-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathcontext_manager_trace_llm.py
More file actions
70 lines (59 loc) · 2.15 KB
/
context_manager_trace_llm.py
File metadata and controls
70 lines (59 loc) · 2.15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
"""
Langfuse API Level 3: Context Manager (Langfuse v3)
This approach uses context managers to explicitly define trace hierarchy.
You have fine-grained control over spans and generations.
"""
from langfuse import Langfuse
from openai import OpenAI
from dotenv import load_dotenv
load_dotenv()
client = OpenAI()
langfuse = Langfuse()
expression = "123 + 456 * 2"
# Create a trace and use context managers for spans/generations
with langfuse.start_as_current_span(name="calculator_context_manager") as trace:
# Add a span for preprocessing
with langfuse.start_as_current_span(name="input_validation") as validation_span:
langfuse.update_current_span(
input={"expression": expression},
output={"status": "valid"},
)
# Add a generation for the LLM call
with langfuse.start_as_current_generation(
name="llm_calculation",
model="gpt-4o-mini",
input=[
{
"role": "system",
"content": "You are a very accurate calculator. You output only the result of the calculation.",
},
{"role": "user", "content": expression},
],
) as generation:
completion = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{
"role": "system",
"content": "You are a very accurate calculator. You output only the result of the calculation.",
},
{"role": "user", "content": expression},
],
)
result = completion.choices[0].message.content
langfuse.update_current_generation(
output=result,
usage_details={
"input": completion.usage.prompt_tokens,
"output": completion.usage.completion_tokens,
},
metadata={"project": "context_manager_example"},
)
# Update trace with final output
langfuse.update_current_trace(
output={"result": result},
tags=["calculator", "context_manager"],
)
print(f"{expression} = {result}")
# Ensure data is sent to Langfuse before script exits
langfuse.flush()