Skip to content

Commit cf784d8

Browse files
authored
fix(sdk): manual logging example + fix span ended error (#3352)
1 parent b575c7d commit cf784d8

File tree

2 files changed

+134
-19
lines changed

2 files changed

+134
-19
lines changed
Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
"""
2+
Manual logging example using OpenLLMetry tracing.
3+
4+
This example demonstrates how to manually report LLM calls using the
5+
track_llm_call context manager from traceloop.sdk.tracing.manual.
6+
"""
7+
8+
import os
9+
import openai
10+
from dotenv import load_dotenv
11+
from traceloop.sdk import Traceloop
12+
from traceloop.sdk.tracing.manual import LLMMessage, LLMUsage, track_llm_call
13+
14+
15+
def main():
16+
# Load environment variables
17+
load_dotenv()
18+
19+
# Initialize Traceloop
20+
Traceloop.init(
21+
app_name="manual-logging-example",
22+
disable_batch=True,
23+
instruments={}
24+
)
25+
26+
# Initialize OpenAI client
27+
openai_client = openai.OpenAI(
28+
api_key=os.getenv("OPENAI_API_KEY")
29+
)
30+
31+
print("Starting manual logging example...")
32+
33+
# Example 1: Basic LLM call with manual reporting
34+
print("\n=== Example 1: Basic LLM Call ===")
35+
with track_llm_call(vendor="openai", type="chat") as span:
36+
# Report the request
37+
messages = [
38+
LLMMessage(role="user", content="Tell me a joke about opentelemetry")
39+
]
40+
span.report_request(
41+
model="gpt-4o",
42+
messages=messages,
43+
)
44+
45+
# Make the actual API call
46+
response = openai_client.chat.completions.create(
47+
model="gpt-4o",
48+
messages=[
49+
{"role": "user", "content": "Tell me a joke about opentelemetry"}
50+
],
51+
)
52+
53+
# Report the response
54+
response_messages = [choice.message.content for choice in response.choices]
55+
span.report_response(response.model, response_messages)
56+
57+
# Report usage metrics
58+
if response.usage:
59+
span.report_usage(
60+
LLMUsage(
61+
prompt_tokens=response.usage.prompt_tokens,
62+
completion_tokens=response.usage.completion_tokens,
63+
total_tokens=response.usage.total_tokens,
64+
)
65+
)
66+
67+
print(f"Response: {response_messages[0]}")
68+
69+
# Example 2: Second independent LLM call
70+
print("\n=== Example 2: Second LLM Call ===")
71+
with track_llm_call(vendor="openai", type="chat") as span:
72+
# Report the request
73+
messages = [
74+
LLMMessage(role="user", content="What is machine learning?")
75+
]
76+
span.report_request(
77+
model="gpt-4o",
78+
messages=messages,
79+
)
80+
81+
# Make the actual API call
82+
response = openai_client.chat.completions.create(
83+
model="gpt-4o",
84+
messages=[
85+
{"role": "user", "content": "What is machine learning?"}
86+
],
87+
)
88+
89+
# Report the response
90+
response_messages = [choice.message.content for choice in response.choices]
91+
span.report_response(response.model, response_messages)
92+
93+
# Report usage metrics
94+
if response.usage:
95+
span.report_usage(
96+
LLMUsage(
97+
prompt_tokens=response.usage.prompt_tokens,
98+
completion_tokens=response.usage.completion_tokens,
99+
total_tokens=response.usage.total_tokens,
100+
)
101+
)
102+
103+
print(f"Response: {response_messages[0]}")
104+
105+
print("\nManual logging example completed!")
106+
107+
108+
if __name__ == "__main__":
109+
main()

packages/traceloop-sdk/traceloop/sdk/tracing/manual.py

Lines changed: 25 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
from contextlib import contextmanager
2+
from opentelemetry import context
23
from opentelemetry.semconv_ai import SpanAttributes
3-
from opentelemetry.trace import Span
4+
from opentelemetry.trace import Span, set_span_in_context
45
from pydantic import BaseModel
6+
from typing import Optional
57
from traceloop.sdk.tracing.context_manager import get_tracer
68

79

@@ -14,8 +16,8 @@ class LLMUsage(BaseModel):
1416
prompt_tokens: int
1517
completion_tokens: int
1618
total_tokens: int
17-
cache_creation_input_tokens: int
18-
cache_read_input_tokens: int
19+
cache_creation_input_tokens: Optional[int] = None
20+
cache_read_input_tokens: Optional[int] = None
1921

2022

2123
class LLMSpan:
@@ -55,24 +57,28 @@ def report_usage(self, usage: LLMUsage):
5557
self._span.set_attribute(
5658
SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.total_tokens
5759
)
58-
self._span.set_attribute(
59-
SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS,
60-
usage.cache_creation_input_tokens,
61-
)
62-
self._span.set_attribute(
63-
SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS,
64-
usage.cache_read_input_tokens,
65-
)
60+
if usage.cache_creation_input_tokens is not None:
61+
self._span.set_attribute(
62+
SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS,
63+
usage.cache_creation_input_tokens,
64+
)
65+
if usage.cache_read_input_tokens is not None:
66+
self._span.set_attribute(
67+
SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS,
68+
usage.cache_read_input_tokens,
69+
)
6670

6771

6872
@contextmanager
6973
def track_llm_call(vendor: str, type: str):
7074
with get_tracer() as tracer:
71-
with tracer.start_as_current_span(name=f"{vendor}.{type}") as span:
72-
span.set_attribute(SpanAttributes.LLM_SYSTEM, vendor)
73-
span.set_attribute(SpanAttributes.LLM_REQUEST_TYPE, type)
74-
llm_span = LLMSpan(span)
75-
try:
76-
yield llm_span
77-
finally:
78-
span.end()
75+
span = tracer.start_span(name=f"{vendor}.{type}")
76+
span.set_attribute(SpanAttributes.LLM_SYSTEM, vendor)
77+
span.set_attribute(SpanAttributes.LLM_REQUEST_TYPE, type)
78+
ctx = set_span_in_context(span)
79+
token = context.attach(ctx)
80+
try:
81+
yield LLMSpan(span)
82+
finally:
83+
context.detach(token)
84+
span.end()

0 commit comments

Comments
 (0)