Skip to content
This repository was archived by the owner on Aug 5, 2025. It is now read-only.

Commit b63e912

Browse files
refactor: address missing issues
1 parent 95cdb03 commit b63e912

File tree

7 files changed

+151
-25
lines changed

7 files changed

+151
-25
lines changed

examples/langchain_toolcall.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
tools = [search]
2020

2121
lai_client = LiteralClient()
22+
lai_client.initialize()
2223
lai_prompt = lai_client.api.get_or_create_prompt(
2324
name="LC Agent",
2425
settings={
@@ -37,13 +38,13 @@
3738
{"role": "assistant", "content": "{{agent_scratchpad}}"},
3839
],
3940
)
40-
prompt = lai_prompt.to_langchain_chat_prompt_template()
41+
prompt = lai_prompt.to_langchain_chat_prompt_template(
42+
additional_messages=[("placeholder", "{agent_scratchpad}")],
43+
)
4144

4245
agent: BaseSingleActionAgent = create_tool_calling_agent(model, tools, prompt) # type: ignore
4346
agent_executor = AgentExecutor(agent=agent, tools=tools)
4447

45-
cb = lai_client.langchain_callback()
46-
4748
# Replace with ainvoke for asynchronous execution.
4849
agent_executor.invoke(
4950
{
@@ -56,5 +57,5 @@
5657
],
5758
"input": "whats the weather in sf?",
5859
},
59-
config=RunnableConfig(callbacks=[cb], run_name="Weather SF"),
60+
config=RunnableConfig(run_name="Weather SF"),
6061
)

examples/langchain_variable.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,13 @@
11
from langchain.chat_models import init_chat_model
22
from literalai import LiteralClient
3-
from langchain.schema.runnable.config import RunnableConfig
3+
44

55
from dotenv import load_dotenv
66

77
load_dotenv()
88

99
lai = LiteralClient()
10+
lai.initialize()
1011

1112
prompt = lai.api.get_or_create_prompt(
1213
name="user intent",
@@ -29,13 +30,14 @@
2930
input_messages = messages.format_messages(
3031
user_message="The screen is cracked, there are scratches on the surface, and a component is missing."
3132
)
32-
cb = lai.langchain_callback()
3333

3434
# Returns a langchain_openai.ChatOpenAI instance.
3535
gpt_4o = init_chat_model( # type: ignore
3636
model_provider=prompt.provider,
3737
**prompt.settings,
3838
)
39-
print(gpt_4o.invoke(input_messages, config=RunnableConfig(callbacks=[cb])))
39+
40+
lai.set_properties(prompt=prompt)
41+
print(gpt_4o.invoke(input_messages))
4042

4143
lai.flush_and_stop()

examples/multimodal.py

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
import base64
2+
import requests # type: ignore
3+
import time
4+
5+
from literalai import LiteralClient
6+
from openai import OpenAI
7+
8+
from dotenv import load_dotenv
9+
10+
from literalai.observability.step import ScoreDict
11+
12+
load_dotenv()
13+
14+
openai_client = OpenAI()
15+
16+
literalai_client = LiteralClient()
17+
literalai_client.initialize()
18+
19+
20+
def encode_image(url):
21+
return base64.b64encode(requests.get(url).content)
22+
23+
24+
@literalai_client.step(type="run")
25+
def generate_answer(user_query, image_url):
26+
literalai_client.set_properties(
27+
name="foobar",
28+
metadata={"foo": "bar"},
29+
tags=["foo", "bar"],
30+
)
31+
completion = openai_client.chat.completions.create(
32+
model="gpt-4o-mini",
33+
messages=[
34+
{
35+
"role": "user",
36+
"content": [
37+
{"type": "text", "text": user_query},
38+
{
39+
"type": "image_url",
40+
"image_url": {"url": image_url},
41+
},
42+
],
43+
},
44+
],
45+
max_tokens=300,
46+
)
47+
return completion.choices[0].message.content
48+
49+
50+
def main():
51+
with literalai_client.thread(name="Meal Analyzer") as thread:
52+
welcome_message = (
53+
"Welcome to the meal analyzer, please upload an image of your plate!"
54+
)
55+
literalai_client.message(
56+
content=welcome_message, type="assistant_message", name="My Assistant"
57+
)
58+
59+
user_query = "Is this a healthy meal?"
60+
user_image = "https://www.eatthis.com/wp-content/uploads/sites/4/2021/05/healthy-plate.jpg"
61+
user_step = literalai_client.message(
62+
content=user_query, type="user_message", name="User"
63+
)
64+
65+
time.sleep(1) # to make sure the user step has arrived at Literal AI
66+
67+
literalai_client.api.create_attachment(
68+
thread_id=thread.id,
69+
step_id=user_step.id,
70+
name="meal_image",
71+
content=encode_image(user_image),
72+
)
73+
74+
answer = generate_answer(user_query=user_query, image_url=user_image)
75+
literalai_client.message(
76+
content=answer, type="assistant_message", name="My Assistant"
77+
)
78+
79+
80+
main()
81+
# Network requests by the SDK are performed asynchronously.
82+
# Invoke flush_and_stop() to guarantee the completion of all requests prior to the process termination.
83+
# WARNING: If you run a continuous server, you should not use this method.
84+
literalai_client.flush_and_stop()

examples/streaming.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313

1414
sdk = LiteralClient(batch_size=2)
15-
sdk.instrument_openai()
15+
sdk.initialize()
1616

1717

1818
@sdk.thread

literalai/client.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
step_decorator,
3030
)
3131
from literalai.observability.thread import ThreadContextManager, thread_decorator
32+
from literalai.prompt_engineering.prompt import Prompt
3233
from literalai.requirements import check_all_requirements
3334

3435

@@ -373,6 +374,7 @@ def set_properties(
373374
name: Optional[str] = None,
374375
tags: Optional[List[str]] = None,
375376
metadata: Optional[Dict[str, Any]] = None,
377+
prompt: Optional[Prompt] = None,
376378
):
377379
thread = active_thread_var.get()
378380
root_run = active_root_run_var.get()
@@ -386,6 +388,7 @@ def set_properties(
386388
"literal.name": str(name) if name else "None",
387389
"literal.tags": json.dumps(tags) if tags else "None",
388390
"literal.metadata": json.dumps(metadata) if metadata else "None",
391+
"literal.prompt": json.dumps(prompt.to_dict()) if prompt else "None",
389392
}
390393
)
391394

literalai/event_processor.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,6 @@ def _process_batch(self, batch: List):
9898
self.processing_counter -= len(batch)
9999

100100
def flush_and_stop(self):
101-
time.sleep(4)
102101
self.stop_event.set()
103102
if not self.disabled:
104103
self.processing_thread.join()

literalai/exporter.py

Lines changed: 53 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
1-
from datetime import datetime, timezone
1+
from datetime import date, datetime, timezone
22
import json
3+
from annotated_types import Timezone
34
from opentelemetry.sdk.trace import ReadableSpan
45
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
56
from typing import Dict, List, Optional, Sequence, cast
@@ -10,6 +11,7 @@
1011
from literalai.helper import utc_now
1112
from literalai.observability.generation import GenerationType
1213
from literalai.observability.step import Step, StepDict
14+
from literalai.prompt_engineering.prompt import PromptDict
1315

1416

1517
class LoggingSpanExporter(SpanExporter):
@@ -56,14 +58,8 @@ def force_flush(self, timeout_millis: float = 30000) -> bool:
5658
"""Force flush the exporter."""
5759
return True
5860

59-
# # TODO: Add generation promptid
60-
# # TODO: Add generation variables
61-
# # TODO: Check missing variables
62-
# # TODO: ttFirstToken
63-
# # TODO: duration
64-
# # TODO: tokenThroughputInSeconds
65-
# # TODO: Add tools
66-
# # TODO: error check with gemini error
61+
# TODO: error check with gemini error
62+
# TODO: ttFirstToken
6763
def _create_step_from_span(self, span: ReadableSpan) -> Step:
6864
"""Convert a span to a Step object"""
6965
attributes = span.attributes or {}
@@ -78,6 +74,11 @@ def _create_step_from_span(self, span: ReadableSpan) -> Step:
7874
if span.end_time
7975
else utc_now()
8076
)
77+
duration, token_throughput = self._calculate_duration_and_throughput(
78+
span.start_time,
79+
span.end_time,
80+
int(str(attributes.get("llm.usage.total_tokens", 0))),
81+
)
8182

8283
generation_type = attributes.get("llm.request.type")
8384
is_chat = generation_type == "chat"
@@ -103,19 +104,35 @@ def _create_step_from_span(self, span: ReadableSpan) -> Step:
103104
k: str(v) for k, v in span_props.items() if v is not None and v != "None"
104105
}
105106

107+
serialized_prompt = attributes.get(
108+
"traceloop.association.properties.literal.prompt"
109+
)
110+
prompt = cast(
111+
Optional[PromptDict],
112+
(
113+
self._extract_json(str(serialized_prompt))
114+
if serialized_prompt and serialized_prompt != "None"
115+
else None
116+
),
117+
)
118+
106119
generation_content = {
120+
"duration": duration,
107121
"messages": (
108-
self.extract_messages(cast(Dict, attributes)) if is_chat else None
122+
self._extract_messages(cast(Dict, attributes)) if is_chat else None
109123
),
110124
"message_completion": (
111-
self.extract_messages(cast(Dict, attributes), "gen_ai.completion.")[0]
125+
self._extract_messages(cast(Dict, attributes), "gen_ai.completion.")[0]
112126
if is_chat
113127
else None
114128
),
115129
"prompt": attributes.get("gen_ai.prompt.0.user"),
130+
"promptId": prompt.get("id") if prompt else None,
116131
"completion": attributes.get("gen_ai.completion.0.content"),
117132
"model": attributes.get("gen_ai.request.model"),
118133
"provider": attributes.get("gen_ai.system"),
134+
"tokenThroughputInSeconds": token_throughput,
135+
"variables": prompt.get("variables") if prompt else None,
119136
}
120137
generation_settings = {
121138
"max_tokens": attributes.get("gen_ai.request.max_tokens"),
@@ -133,13 +150,13 @@ def _create_step_from_span(self, span: ReadableSpan) -> Step:
133150
"id": str(span.context.span_id) if span.context else None,
134151
"name": span_props.get("name", span.name),
135152
"type": "llm",
136-
"metadata": self.extract_json(span_props.get("metadata", "{}")),
153+
"metadata": self._extract_json(span_props.get("metadata", "{}")),
137154
"startTime": start_time,
138155
"endTime": end_time,
139156
"threadId": span_props.get("thread_id"),
140157
"parentId": span_props.get("parent_id"),
141158
"rootRunId": span_props.get("root_run_id"),
142-
"tags": self.extract_json(span_props.get("tags", "[]")),
159+
"tags": self._extract_json(span_props.get("tags", "[]")),
143160
"input": {
144161
"content": (
145162
generation_content["messages"]
@@ -176,7 +193,7 @@ def _create_step_from_span(self, span: ReadableSpan) -> Step:
176193

177194
return step
178195

179-
def extract_messages(
196+
def _extract_messages(
180197
self, data: Dict, prefix: str = "gen_ai.prompt."
181198
) -> List[Dict]:
182199
messages = []
@@ -188,22 +205,42 @@ def extract_messages(
188205

189206
if role_key not in data or content_key not in data:
190207
break
208+
if data[role_key] == "placeholder":
209+
break
191210

192211
messages.append(
193212
{
194213
"role": data[role_key],
195-
"content": self.extract_json(data[content_key]),
214+
"content": self._extract_json(data[content_key]),
196215
}
197216
)
198217

199218
index += 1
200219

201220
return messages
202221

203-
def extract_json(self, data: str) -> Dict | List | str:
222+
def _extract_json(self, data: str) -> Dict | List | str:
204223
try:
205224
content = json.loads(data)
206225
except Exception:
207226
content = data
208227

209228
return content
229+
230+
def _calculate_duration_and_throughput(
231+
self,
232+
start_time_ns: Optional[int],
233+
end_time_ns: Optional[int],
234+
total_tokens: Optional[int],
235+
) -> tuple[float, Optional[float]]:
236+
"""Calculate duration in seconds and token throughput per second."""
237+
duration_ns = (
238+
end_time_ns - start_time_ns if start_time_ns and end_time_ns else 0
239+
)
240+
duration_seconds = duration_ns / 1e9
241+
242+
token_throughput = None
243+
if total_tokens is not None and duration_seconds > 0:
244+
token_throughput = total_tokens / duration_seconds
245+
246+
return duration_seconds, token_throughput

0 commit comments

Comments
 (0)