Skip to content
This repository was archived by the owner on Aug 5, 2025. It is now read-only.

Commit 843de0c

Browse files
authored
Merge pull request #157 from Chainlit/clement/eng-2181-otel-exporter
Clement/eng 2181 otel exporter
2 parents cd3dc94 + f381baf commit 843de0c

File tree

12 files changed

+497
-13
lines changed

12 files changed

+497
-13
lines changed

examples/langchain_toolcall.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
tools = [search]
2020

2121
lai_client = LiteralClient()
22+
lai_client.initialize()
2223
lai_prompt = lai_client.api.get_or_create_prompt(
2324
name="LC Agent",
2425
settings={
@@ -37,13 +38,13 @@
3738
{"role": "assistant", "content": "{{agent_scratchpad}}"},
3839
],
3940
)
40-
prompt = lai_prompt.to_langchain_chat_prompt_template()
41+
prompt = lai_prompt.to_langchain_chat_prompt_template(
42+
additional_messages=[("placeholder", "{agent_scratchpad}")],
43+
)
4144

4245
agent: BaseSingleActionAgent = create_tool_calling_agent(model, tools, prompt) # type: ignore
4346
agent_executor = AgentExecutor(agent=agent, tools=tools)
4447

45-
cb = lai_client.langchain_callback()
46-
4748
# Replace with ainvoke for asynchronous execution.
4849
agent_executor.invoke(
4950
{
@@ -56,5 +57,5 @@
5657
],
5758
"input": "whats the weather in sf?",
5859
},
59-
config=RunnableConfig(callbacks=[cb], run_name="Weather SF"),
60+
config=RunnableConfig(run_name="Weather SF"),
6061
)

examples/langchain_variable.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,13 @@
11
from langchain.chat_models import init_chat_model
22
from literalai import LiteralClient
3-
from langchain.schema.runnable.config import RunnableConfig
3+
44

55
from dotenv import load_dotenv
66

77
load_dotenv()
88

99
lai = LiteralClient()
10+
lai.initialize()
1011

1112
prompt = lai.api.get_or_create_prompt(
1213
name="user intent",
@@ -29,13 +30,14 @@
2930
input_messages = messages.format_messages(
3031
user_message="The screen is cracked, there are scratches on the surface, and a component is missing."
3132
)
32-
cb = lai.langchain_callback()
3333

3434
# Returns a langchain_openai.ChatOpenAI instance.
3535
gpt_4o = init_chat_model( # type: ignore
3636
model_provider=prompt.provider,
3737
**prompt.settings,
3838
)
39-
print(gpt_4o.invoke(input_messages, config=RunnableConfig(callbacks=[cb])))
39+
40+
lai.set_properties(prompt=prompt)
41+
print(gpt_4o.invoke(input_messages))
4042

4143
lai.flush_and_stop()

examples/llamaindex_workflow.py

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
import asyncio
2+
from llama_index.core.workflow import (
3+
Event,
4+
StartEvent,
5+
StopEvent,
6+
Workflow,
7+
step,
8+
)
9+
from llama_index.llms.openai import OpenAI
10+
from literalai.client import LiteralClient
11+
12+
lai_client = LiteralClient()
13+
lai_client.initialize()
14+
15+
16+
class JokeEvent(Event):
17+
joke: str
18+
19+
class RewriteJoke(Event):
20+
joke: str
21+
22+
23+
class JokeFlow(Workflow):
24+
llm = OpenAI()
25+
26+
@step()
27+
async def generate_joke(self, ev: StartEvent) -> JokeEvent:
28+
topic = ev.topic
29+
30+
prompt = f"Write your best joke about {topic}."
31+
response = await self.llm.acomplete(prompt)
32+
return JokeEvent(joke=str(response))
33+
34+
@step()
35+
async def return_joke(self, ev: JokeEvent) -> RewriteJoke:
36+
return RewriteJoke(joke=ev.joke + "What is funny?")
37+
38+
@step()
39+
async def critique_joke(self, ev: RewriteJoke) -> StopEvent:
40+
joke = ev.joke
41+
42+
prompt = f"Give a thorough analysis and critique of the following joke: {joke}"
43+
response = await self.llm.acomplete(prompt)
44+
return StopEvent(result=str(response))
45+
46+
47+
@lai_client.thread(name="JokeFlow")
48+
async def main():
49+
w = JokeFlow(timeout=60, verbose=False)
50+
result = await w.run(topic="pirates")
51+
print(str(result))
52+
53+
54+
if __name__ == "__main__":
55+
asyncio.run(main())

examples/multimodal.py

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
import base64
2+
import requests # type: ignore
3+
import time
4+
5+
from literalai import LiteralClient
6+
from openai import OpenAI
7+
8+
from dotenv import load_dotenv
9+
10+
11+
load_dotenv()
12+
13+
openai_client = OpenAI()
14+
15+
literalai_client = LiteralClient()
16+
literalai_client.initialize()
17+
18+
19+
def encode_image(url):
20+
return base64.b64encode(requests.get(url).content)
21+
22+
23+
@literalai_client.step(type="run")
24+
def generate_answer(user_query, image_url):
25+
literalai_client.set_properties(
26+
name="foobar",
27+
metadata={"foo": "bar"},
28+
tags=["foo", "bar"],
29+
)
30+
completion = openai_client.chat.completions.create(
31+
model="gpt-4o-mini",
32+
messages=[
33+
{
34+
"role": "user",
35+
"content": [
36+
{"type": "text", "text": user_query},
37+
{
38+
"type": "image_url",
39+
"image_url": {"url": image_url},
40+
},
41+
],
42+
},
43+
],
44+
max_tokens=300,
45+
)
46+
return completion.choices[0].message.content
47+
48+
49+
def main():
50+
with literalai_client.thread(name="Meal Analyzer") as thread:
51+
welcome_message = (
52+
"Welcome to the meal analyzer, please upload an image of your plate!"
53+
)
54+
literalai_client.message(
55+
content=welcome_message, type="assistant_message", name="My Assistant"
56+
)
57+
58+
user_query = "Is this a healthy meal?"
59+
user_image = "https://www.eatthis.com/wp-content/uploads/sites/4/2021/05/healthy-plate.jpg"
60+
user_step = literalai_client.message(
61+
content=user_query, type="user_message", name="User"
62+
)
63+
64+
time.sleep(1) # to make sure the user step has arrived at Literal AI
65+
66+
literalai_client.api.create_attachment(
67+
thread_id=thread.id,
68+
step_id=user_step.id,
69+
name="meal_image",
70+
content=encode_image(user_image),
71+
)
72+
73+
answer = generate_answer(user_query=user_query, image_url=user_image)
74+
literalai_client.message(
75+
content=answer, type="assistant_message", name="My Assistant"
76+
)
77+
78+
79+
main()
80+
# Network requests by the SDK are performed asynchronously.
81+
# Invoke flush_and_stop() to guarantee the completion of all requests prior to the process termination.
82+
# WARNING: If you run a continuous server, you should not use this method.
83+
literalai_client.flush_and_stop()

examples/streaming.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313

1414
sdk = LiteralClient(batch_size=2)
15-
sdk.instrument_openai()
15+
sdk.initialize()
1616

1717

1818
@sdk.thread

literalai/client.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,10 @@
1+
import json
12
import os
3+
from traceloop.sdk import Traceloop
24
from typing import Any, Dict, List, Optional, Union
5+
from typing_extensions import deprecated
6+
import io
7+
from contextlib import redirect_stdout
38

49
from literalai.api import AsyncLiteralAPI, LiteralAPI
510
from literalai.callback.langchain_callback import get_langchain_callback
@@ -10,6 +15,7 @@
1015
experiment_item_run_decorator,
1116
)
1217
from literalai.event_processor import EventProcessor
18+
from literalai.exporter import LoggingSpanExporter
1319
from literalai.instrumentation.mistralai import instrument_mistralai
1420
from literalai.instrumentation.openai import instrument_openai
1521
from literalai.my_types import Environment
@@ -23,6 +29,7 @@
2329
step_decorator,
2430
)
2531
from literalai.observability.thread import ThreadContextManager, thread_decorator
32+
from literalai.prompt_engineering.prompt import Prompt
2633
from literalai.requirements import check_all_requirements
2734

2835

@@ -92,18 +99,21 @@ def to_sync(self) -> "LiteralClient":
9299
else:
93100
return self # type: ignore
94101

102+
@deprecated("Use Literal.initialize instead")
95103
def instrument_openai(self):
96104
"""
97105
Instruments the OpenAI SDK so that all LLM calls are logged to Literal AI.
98106
"""
99107
instrument_openai(self.to_sync())
100108

109+
@deprecated("Use Literal.initialize instead")
101110
def instrument_mistralai(self):
102111
"""
103112
Instruments the Mistral AI SDK so that all LLM calls are logged to Literal AI.
104113
"""
105114
instrument_mistralai(self.to_sync())
106115

116+
@deprecated("Use Literal.initialize instead")
107117
def instrument_llamaindex(self):
108118
"""
109119
Instruments the Llama Index framework so that all RAG & LLM calls are logged to Literal AI.
@@ -119,6 +129,13 @@ def instrument_llamaindex(self):
119129

120130
instrument_llamaindex(self.to_sync())
121131

132+
def initialize(self):
133+
with redirect_stdout(io.StringIO()):
134+
Traceloop.init(
135+
disable_batch=True,
136+
exporter=LoggingSpanExporter(event_processor=self.event_processor),
137+
)
138+
122139
def langchain_callback(
123140
self,
124141
to_ignore: Optional[List[str]] = None,
@@ -352,6 +369,29 @@ def get_current_root_run(self):
352369
"""
353370
return active_root_run_var.get()
354371

372+
def set_properties(
373+
self,
374+
name: Optional[str] = None,
375+
tags: Optional[List[str]] = None,
376+
metadata: Optional[Dict[str, Any]] = None,
377+
prompt: Optional[Prompt] = None,
378+
):
379+
thread = active_thread_var.get()
380+
root_run = active_root_run_var.get()
381+
parent = active_steps_var.get()[-1] if active_steps_var.get() else None
382+
383+
Traceloop.set_association_properties(
384+
{
385+
"literal.thread_id": str(thread.id) if thread else "None",
386+
"literal.parent_id": str(parent.id) if parent else "None",
387+
"literal.root_run_id": str(root_run.id) if root_run else "None",
388+
"literal.name": str(name) if name else "None",
389+
"literal.tags": json.dumps(tags) if tags else "None",
390+
"literal.metadata": json.dumps(metadata) if metadata else "None",
391+
"literal.prompt": json.dumps(prompt.to_dict()) if prompt else "None",
392+
}
393+
)
394+
355395
def reset_context(self):
356396
"""
357397
Resets the context, forgetting active steps & setting current thread to None.

0 commit comments

Comments
 (0)