Skip to content

Commit cca571a

Browse files
Liudmila Molkovaxrmx
andauthored
Record content events regardless of span sampling decision (#3226)
* Record content events regardless of span sampling decision * changelog * feedback * ruff --------- Co-authored-by: Riccardo Magliocchetti <[email protected]>
1 parent 6b3a11b commit cca571a

File tree

7 files changed

+421
-49
lines changed

7 files changed

+421
-49
lines changed

instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## Unreleased
99

10+
- Record prompt and completion events regardless of span sampling decision.
11+
([#3226](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3226))
12+
1013
## Version 2.1b0 (2025-01-18)
1114

1215
- Coerce openai response_format to semconv format

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/patch.py

Lines changed: 40 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -56,11 +56,8 @@ def traced_method(wrapped, instance, args, kwargs):
5656
attributes=span_attributes,
5757
end_on_exit=False,
5858
) as span:
59-
if span.is_recording():
60-
for message in kwargs.get("messages", []):
61-
event_logger.emit(
62-
message_to_event(message, capture_content)
63-
)
59+
for message in kwargs.get("messages", []):
60+
event_logger.emit(message_to_event(message, capture_content))
6461

6562
start = default_timer()
6663
result = None
@@ -76,6 +73,9 @@ def traced_method(wrapped, instance, args, kwargs):
7673
_set_response_attributes(
7774
span, result, event_logger, capture_content
7875
)
76+
for choice in getattr(result, "choices", []):
77+
event_logger.emit(choice_to_event(choice, capture_content))
78+
7979
span.end()
8080
return result
8181

@@ -114,11 +114,8 @@ async def traced_method(wrapped, instance, args, kwargs):
114114
attributes=span_attributes,
115115
end_on_exit=False,
116116
) as span:
117-
if span.is_recording():
118-
for message in kwargs.get("messages", []):
119-
event_logger.emit(
120-
message_to_event(message, capture_content)
121-
)
117+
for message in kwargs.get("messages", []):
118+
event_logger.emit(message_to_event(message, capture_content))
122119

123120
start = default_timer()
124121
result = None
@@ -134,6 +131,9 @@ async def traced_method(wrapped, instance, args, kwargs):
134131
_set_response_attributes(
135132
span, result, event_logger, capture_content
136133
)
134+
for choice in getattr(result, "choices", []):
135+
event_logger.emit(choice_to_event(choice, capture_content))
136+
137137
span.end()
138138
return result
139139

@@ -228,12 +228,8 @@ def _set_response_attributes(
228228
)
229229

230230
if getattr(result, "choices", None):
231-
choices = result.choices
232-
for choice in choices:
233-
event_logger.emit(choice_to_event(choice, capture_content))
234-
235231
finish_reasons = []
236-
for choice in choices:
232+
for choice in result.choices:
237233
finish_reasons.append(choice.finish_reason or "error")
238234

239235
set_span_attribute(
@@ -333,42 +329,43 @@ def setup(self):
333329

334330
def cleanup(self):
335331
if self._span_started:
336-
if self.response_model:
332+
if self.span.is_recording():
333+
if self.response_model:
334+
set_span_attribute(
335+
self.span,
336+
GenAIAttributes.GEN_AI_RESPONSE_MODEL,
337+
self.response_model,
338+
)
339+
340+
if self.response_id:
341+
set_span_attribute(
342+
self.span,
343+
GenAIAttributes.GEN_AI_RESPONSE_ID,
344+
self.response_id,
345+
)
346+
337347
set_span_attribute(
338348
self.span,
339-
GenAIAttributes.GEN_AI_RESPONSE_MODEL,
340-
self.response_model,
349+
GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS,
350+
self.prompt_tokens,
341351
)
342-
343-
if self.response_id:
344352
set_span_attribute(
345353
self.span,
346-
GenAIAttributes.GEN_AI_RESPONSE_ID,
347-
self.response_id,
354+
GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
355+
self.completion_tokens,
348356
)
349357

350-
set_span_attribute(
351-
self.span,
352-
GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS,
353-
self.prompt_tokens,
354-
)
355-
set_span_attribute(
356-
self.span,
357-
GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
358-
self.completion_tokens,
359-
)
360-
361-
set_span_attribute(
362-
self.span,
363-
GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
364-
self.service_tier,
365-
)
358+
set_span_attribute(
359+
self.span,
360+
GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
361+
self.service_tier,
362+
)
366363

367-
set_span_attribute(
368-
self.span,
369-
GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS,
370-
self.finish_reasons,
371-
)
364+
set_span_attribute(
365+
self.span,
366+
GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS,
367+
self.finish_reasons,
368+
)
372369

373370
for idx, choice in enumerate(self.choice_buffers):
374371
message = {"role": "assistant"}
Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
interactions:
2+
- request:
3+
body: |-
4+
{
5+
"messages": [
6+
{
7+
"role": "user",
8+
"content": "Say this is a test"
9+
}
10+
],
11+
"model": "gpt-4",
12+
"stream": true,
13+
"stream_options": {
14+
"include_usage": true
15+
}
16+
}
17+
headers:
18+
accept:
19+
- application/json
20+
accept-encoding:
21+
- gzip, deflate
22+
authorization:
23+
- Bearer test_openai_api_key
24+
connection:
25+
- keep-alive
26+
content-length:
27+
- '142'
28+
content-type:
29+
- application/json
30+
host:
31+
- api.openai.com
32+
user-agent:
33+
- AsyncOpenAI/Python 1.26.0
34+
x-stainless-arch:
35+
- arm64
36+
x-stainless-async:
37+
- async:asyncio
38+
x-stainless-lang:
39+
- python
40+
x-stainless-os:
41+
- MacOS
42+
x-stainless-package-version:
43+
- 1.26.0
44+
x-stainless-runtime:
45+
- CPython
46+
x-stainless-runtime-version:
47+
- 3.12.5
48+
method: POST
49+
uri: https://api.openai.com/v1/chat/completions
50+
response:
51+
body:
52+
string: |+
53+
data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null}
54+
55+
data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"This"},"logprobs":null,"finish_reason":null}],"usage":null}
56+
57+
data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" is"},"logprobs":null,"finish_reason":null}],"usage":null}
58+
59+
data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" a"},"logprobs":null,"finish_reason":null}],"usage":null}
60+
61+
data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" test"},"logprobs":null,"finish_reason":null}],"usage":null}
62+
63+
data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}],"usage":null}
64+
65+
data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null}
66+
67+
data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[],"usage":{"prompt_tokens":12,"completion_tokens":5,"total_tokens":17,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}}}
68+
69+
data: [DONE]
70+
71+
headers:
72+
CF-Cache-Status:
73+
- DYNAMIC
74+
CF-RAY:
75+
- 8e1a80bd2f31e1e5-MRS
76+
Connection:
77+
- keep-alive
78+
Content-Type:
79+
- text/event-stream; charset=utf-8
80+
Date:
81+
- Wed, 13 Nov 2024 00:04:11 GMT
82+
Server:
83+
- cloudflare
84+
Set-Cookie: test_set_cookie
85+
Transfer-Encoding:
86+
- chunked
87+
X-Content-Type-Options:
88+
- nosniff
89+
access-control-expose-headers:
90+
- X-Request-ID
91+
alt-svc:
92+
- h3=":443"; ma=86400
93+
openai-organization: test_openai_org_id
94+
openai-processing-ms:
95+
- '196'
96+
openai-version:
97+
- '2020-10-01'
98+
strict-transport-security:
99+
- max-age=31536000; includeSubDomains; preload
100+
x-ratelimit-limit-requests:
101+
- '10000'
102+
x-ratelimit-limit-tokens:
103+
- '1000000'
104+
x-ratelimit-remaining-requests:
105+
- '9999'
106+
x-ratelimit-remaining-tokens:
107+
- '999977'
108+
x-ratelimit-reset-requests:
109+
- 6ms
110+
x-ratelimit-reset-tokens:
111+
- 1ms
112+
x-request-id:
113+
- req_cc9204ae23338b130df11c8c5b5f31af
114+
status:
115+
code: 200
116+
message: OK
117+
version: 1
Lines changed: 134 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,134 @@
1+
interactions:
2+
- request:
3+
body: |-
4+
{
5+
"messages": [
6+
{
7+
"role": "user",
8+
"content": "Say this is a test"
9+
}
10+
],
11+
"model": "gpt-4o-mini",
12+
"stream": false
13+
}
14+
headers:
15+
accept:
16+
- application/json
17+
accept-encoding:
18+
- gzip, deflate
19+
authorization:
20+
- Bearer test_openai_api_key
21+
connection:
22+
- keep-alive
23+
content-length:
24+
- '106'
25+
content-type:
26+
- application/json
27+
host:
28+
- api.openai.com
29+
user-agent:
30+
- OpenAI/Python 1.54.3
31+
x-stainless-arch:
32+
- arm64
33+
x-stainless-async:
34+
- 'false'
35+
x-stainless-lang:
36+
- python
37+
x-stainless-os:
38+
- MacOS
39+
x-stainless-package-version:
40+
- 1.54.3
41+
x-stainless-retry-count:
42+
- '0'
43+
x-stainless-runtime:
44+
- CPython
45+
x-stainless-runtime-version:
46+
- 3.12.6
47+
method: POST
48+
uri: https://api.openai.com/v1/chat/completions
49+
response:
50+
body:
51+
string: |-
52+
{
53+
"id": "chatcmpl-ASYMQRl3A3DXL9FWCK9tnGRcKIO7q",
54+
"object": "chat.completion",
55+
"created": 1731368630,
56+
"model": "gpt-4o-mini-2024-07-18",
57+
"choices": [
58+
{
59+
"index": 0,
60+
"message": {
61+
"role": "assistant",
62+
"content": "This is a test.",
63+
"refusal": null
64+
},
65+
"logprobs": null,
66+
"finish_reason": "stop"
67+
}
68+
],
69+
"usage": {
70+
"prompt_tokens": 12,
71+
"completion_tokens": 5,
72+
"total_tokens": 17,
73+
"prompt_tokens_details": {
74+
"cached_tokens": 0,
75+
"audio_tokens": 0
76+
},
77+
"completion_tokens_details": {
78+
"reasoning_tokens": 0,
79+
"audio_tokens": 0,
80+
"accepted_prediction_tokens": 0,
81+
"rejected_prediction_tokens": 0
82+
}
83+
},
84+
"system_fingerprint": "fp_0ba0d124f1"
85+
}
86+
headers:
87+
CF-Cache-Status:
88+
- DYNAMIC
89+
CF-RAY:
90+
- 8e122593ff368bc8-SIN
91+
Connection:
92+
- keep-alive
93+
Content-Type:
94+
- application/json
95+
Date:
96+
- Mon, 11 Nov 2024 23:43:50 GMT
97+
Server:
98+
- cloudflare
99+
Set-Cookie: test_set_cookie
100+
Transfer-Encoding:
101+
- chunked
102+
X-Content-Type-Options:
103+
- nosniff
104+
access-control-expose-headers:
105+
- X-Request-ID
106+
alt-svc:
107+
- h3=":443"; ma=86400
108+
content-length:
109+
- '765'
110+
openai-organization: test_openai_org_id
111+
openai-processing-ms:
112+
- '287'
113+
openai-version:
114+
- '2020-10-01'
115+
strict-transport-security:
116+
- max-age=31536000; includeSubDomains; preload
117+
x-ratelimit-limit-requests:
118+
- '10000'
119+
x-ratelimit-limit-tokens:
120+
- '200000'
121+
x-ratelimit-remaining-requests:
122+
- '9999'
123+
x-ratelimit-remaining-tokens:
124+
- '199977'
125+
x-ratelimit-reset-requests:
126+
- 8.64s
127+
x-ratelimit-reset-tokens:
128+
- 6ms
129+
x-request-id:
130+
- req_58cff97afd0e7c0bba910ccf0b044a6f
131+
status:
132+
code: 200
133+
message: OK
134+
version: 1

0 commit comments

Comments
 (0)