Skip to content

Commit a012d67

Browse files
punitmahesPunit Maheshwari
authored andcommitted
fix(response): Add logic to handle LegacyAPIResponse in OpenAI
1 parent 8fa0c1b commit a012d67

File tree

6 files changed

+379
-9
lines changed

6 files changed

+379
-9
lines changed

instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
66
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
77

88
## Unreleased
9+
- Fix `AttributeError` when handling `LegacyAPIResponse` (from `with_raw_response`)
10+
([#4002](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/4002))
911

1012
## Version 2.2b0 (2025-11-25)
1113

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/patch.py

Lines changed: 24 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -66,14 +66,21 @@ def traced_method(wrapped, instance, args, kwargs):
6666
error_type = None
6767
try:
6868
result = wrapped(*args, **kwargs)
69+
if hasattr(result, "parse"):
70+
# result is of type LegacyAPIResponse, call parse to get the actual response
71+
parsed_result = result.parse()
72+
else:
73+
parsed_result = result
6974
if is_streaming(kwargs):
70-
return StreamWrapper(result, span, logger, capture_content)
75+
return StreamWrapper(
76+
parsed_result, span, logger, capture_content
77+
)
7178

7279
if span.is_recording():
7380
_set_response_attributes(
74-
span, result, logger, capture_content
81+
span, parsed_result, logger, capture_content
7582
)
76-
for choice in getattr(result, "choices", []):
83+
for choice in getattr(parsed_result, "choices", []):
7784
logger.emit(choice_to_event(choice, capture_content))
7885

7986
span.end()
@@ -123,14 +130,21 @@ async def traced_method(wrapped, instance, args, kwargs):
123130
error_type = None
124131
try:
125132
result = await wrapped(*args, **kwargs)
133+
if hasattr(result, "parse"):
134+
# result is of type LegacyAPIResponse, calling parse to get the actual response
135+
parsed_result = result.parse()
136+
else:
137+
parsed_result = result
126138
if is_streaming(kwargs):
127-
return StreamWrapper(result, span, logger, capture_content)
139+
return StreamWrapper(
140+
parsed_result, span, logger, capture_content
141+
)
128142

129143
if span.is_recording():
130144
_set_response_attributes(
131-
span, result, logger, capture_content
145+
span, parsed_result, logger, capture_content
132146
)
133-
for choice in getattr(result, "choices", []):
147+
for choice in getattr(parsed_result, "choices", []):
134148
logger.emit(choice_to_event(choice, capture_content))
135149

136150
span.end()
@@ -349,9 +363,10 @@ def _record_metrics(
349363
def _set_response_attributes(
350364
span, result, logger: Logger, capture_content: bool
351365
):
352-
set_span_attribute(
353-
span, GenAIAttributes.GEN_AI_RESPONSE_MODEL, result.model
354-
)
366+
if getattr(result, "model", None):
367+
set_span_attribute(
368+
span, GenAIAttributes.GEN_AI_RESPONSE_MODEL, result.model
369+
)
355370

356371
if getattr(result, "choices", None):
357372
finish_reasons = []
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
interactions:
2+
- request:
3+
body: |-
4+
{
5+
"messages": [
6+
{
7+
"role": "user",
8+
"content": "Say this is a test"
9+
}
10+
],
11+
"model": "gpt-4o-mini"
12+
}
13+
headers:
14+
accept:
15+
- application/json
16+
accept-encoding:
17+
- gzip, deflate
18+
authorization:
19+
- Bearer test_openai_api_key
20+
connection:
21+
- keep-alive
22+
content-length:
23+
- '90'
24+
content-type:
25+
- application/json
26+
host:
27+
- api.openai.com
28+
user-agent:
29+
- OpenAI/Python 1.54.3
30+
x-stainless-arch:
31+
- arm64
32+
x-stainless-async:
33+
- 'true'
34+
x-stainless-lang:
35+
- python
36+
x-stainless-os:
37+
- MacOS
38+
x-stainless-package-version:
39+
- 1.54.3
40+
x-stainless-raw-response:
41+
- 'true'
42+
x-stainless-retry-count:
43+
- '0'
44+
x-stainless-runtime:
45+
- CPython
46+
x-stainless-runtime-version:
47+
- 3.12.6
48+
method: POST
49+
uri: https://api.openai.com/v1/chat/completions
50+
response:
51+
body:
52+
string: |-
53+
{
54+
"id": "chatcmpl-ASYMQRl3A3DXL9FWCK9tnGRcKIO7q",
55+
"object": "chat.completion",
56+
"created": 1731368630,
57+
"model": "gpt-4o-mini-2024-07-18",
58+
"choices": [
59+
{
60+
"index": 0,
61+
"message": {
62+
"role": "assistant",
63+
"content": "This is a test.",
64+
"refusal": null
65+
},
66+
"logprobs": null,
67+
"finish_reason": "stop"
68+
}
69+
],
70+
"usage": {
71+
"prompt_tokens": 12,
72+
"completion_tokens": 5,
73+
"total_tokens": 17,
74+
"prompt_tokens_details": {
75+
"cached_tokens": 0,
76+
"audio_tokens": 0
77+
},
78+
"completion_tokens_details": {
79+
"reasoning_tokens": 0,
80+
"audio_tokens": 0,
81+
"accepted_prediction_tokens": 0,
82+
"rejected_prediction_tokens": 0
83+
}
84+
},
85+
"system_fingerprint": "fp_0ba0d124f1"
86+
}
87+
headers:
88+
CF-Cache-Status:
89+
- DYNAMIC
90+
CF-RAY:
91+
- 8e122593ff368bc8-SIN
92+
Connection:
93+
- keep-alive
94+
Content-Type:
95+
- application/json
96+
Date:
97+
- Mon, 11 Nov 2024 23:43:50 GMT
98+
Server:
99+
- cloudflare
100+
Set-Cookie: test_set_cookie
101+
Transfer-Encoding:
102+
- chunked
103+
X-Content-Type-Options:
104+
- nosniff
105+
access-control-expose-headers:
106+
- X-Request-ID
107+
alt-svc:
108+
- h3=":443"; ma=86400
109+
content-length:
110+
- '765'
111+
openai-organization: test_openai_org_id
112+
openai-processing-ms:
113+
- '287'
114+
openai-version:
115+
- '2020-10-01'
116+
strict-transport-security:
117+
- max-age=31536000; includeSubDomains; preload
118+
x-ratelimit-limit-requests:
119+
- '10000'
120+
x-ratelimit-limit-tokens:
121+
- '200000'
122+
x-ratelimit-remaining-requests:
123+
- '9999'
124+
x-ratelimit-remaining-tokens:
125+
- '199977'
126+
x-ratelimit-reset-requests:
127+
- 8.64s
128+
x-ratelimit-reset-tokens:
129+
- 6ms
130+
x-request-id:
131+
- req_58cff97afd0e7c0bba910ccf0b044a6f
132+
status:
133+
code: 200
134+
message: OK
135+
version: 1
Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
interactions:
2+
- request:
3+
body: |-
4+
{
5+
"messages": [
6+
{
7+
"role": "user",
8+
"content": "Say this is a test"
9+
}
10+
],
11+
"model": "gpt-4o-mini"
12+
}
13+
headers:
14+
accept:
15+
- application/json
16+
accept-encoding:
17+
- gzip, deflate
18+
authorization:
19+
- Bearer test_openai_api_key
20+
connection:
21+
- keep-alive
22+
content-length:
23+
- '90'
24+
content-type:
25+
- application/json
26+
host:
27+
- api.openai.com
28+
user-agent:
29+
- OpenAI/Python 1.54.3
30+
x-stainless-arch:
31+
- arm64
32+
x-stainless-async:
33+
- 'false'
34+
x-stainless-lang:
35+
- python
36+
x-stainless-os:
37+
- MacOS
38+
x-stainless-package-version:
39+
- 1.54.3
40+
x-stainless-raw-response:
41+
- 'true'
42+
x-stainless-retry-count:
43+
- '0'
44+
x-stainless-runtime:
45+
- CPython
46+
x-stainless-runtime-version:
47+
- 3.12.6
48+
method: POST
49+
uri: https://api.openai.com/v1/chat/completions
50+
response:
51+
body:
52+
string: |-
53+
{
54+
"id": "chatcmpl-ASYMQRl3A3DXL9FWCK9tnGRcKIO7q",
55+
"object": "chat.completion",
56+
"created": 1731368630,
57+
"model": "gpt-4o-mini-2024-07-18",
58+
"choices": [
59+
{
60+
"index": 0,
61+
"message": {
62+
"role": "assistant",
63+
"content": "This is a test.",
64+
"refusal": null
65+
},
66+
"logprobs": null,
67+
"finish_reason": "stop"
68+
}
69+
],
70+
"usage": {
71+
"prompt_tokens": 12,
72+
"completion_tokens": 5,
73+
"total_tokens": 17,
74+
"prompt_tokens_details": {
75+
"cached_tokens": 0,
76+
"audio_tokens": 0
77+
},
78+
"completion_tokens_details": {
79+
"reasoning_tokens": 0,
80+
"audio_tokens": 0,
81+
"accepted_prediction_tokens": 0,
82+
"rejected_prediction_tokens": 0
83+
}
84+
},
85+
"system_fingerprint": "fp_0ba0d124f1"
86+
}
87+
headers:
88+
CF-Cache-Status:
89+
- DYNAMIC
90+
CF-RAY:
91+
- 8e122593ff368bc8-SIN
92+
Connection:
93+
- keep-alive
94+
Content-Type:
95+
- application/json
96+
Date:
97+
- Mon, 11 Nov 2024 23:43:50 GMT
98+
Server:
99+
- cloudflare
100+
Set-Cookie: test_set_cookie
101+
Transfer-Encoding:
102+
- chunked
103+
X-Content-Type-Options:
104+
- nosniff
105+
access-control-expose-headers:
106+
- X-Request-ID
107+
alt-svc:
108+
- h3=":443"; ma=86400
109+
content-length:
110+
- '765'
111+
openai-organization: test_openai_org_id
112+
openai-processing-ms:
113+
- '287'
114+
openai-version:
115+
- '2020-10-01'
116+
strict-transport-security:
117+
- max-age=31536000; includeSubDomains; preload
118+
x-ratelimit-limit-requests:
119+
- '10000'
120+
x-ratelimit-limit-tokens:
121+
- '200000'
122+
x-ratelimit-remaining-requests:
123+
- '9999'
124+
x-ratelimit-remaining-tokens:
125+
- '199977'
126+
x-ratelimit-reset-requests:
127+
- 8.64s
128+
x-ratelimit-reset-tokens:
129+
- 6ms
130+
x-request-id:
131+
- req_58cff97afd0e7c0bba910ccf0b044a6f
132+
status:
133+
code: 200
134+
message: OK
135+
version: 1

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -256,6 +256,49 @@ async def test_async_chat_completion_multiple_choices(
256256
assert_message_in_logs(logs[2], "gen_ai.choice", choice_event_1, spans[0])
257257

258258

259+
@pytest.mark.vcr()
260+
@pytest.mark.asyncio()
261+
async def test_async_chat_completion_with_raw_repsonse(
262+
span_exporter, log_exporter, async_openai_client, instrument_with_content
263+
):
264+
llm_model_value = "gpt-4o-mini"
265+
messages_value = [{"role": "user", "content": "Say this is a test"}]
266+
response = (
267+
await async_openai_client.chat.completions.with_raw_response.create(
268+
messages=messages_value,
269+
model=llm_model_value,
270+
)
271+
)
272+
response = response.parse()
273+
spans = span_exporter.get_finished_spans()
274+
assert_all_attributes(
275+
spans[0],
276+
llm_model_value,
277+
response.id,
278+
response.model,
279+
response.usage.prompt_tokens,
280+
response.usage.completion_tokens,
281+
)
282+
283+
logs = log_exporter.get_finished_logs()
284+
assert len(logs) == 2
285+
286+
user_message = {"content": messages_value[0]["content"]}
287+
assert_message_in_logs(
288+
logs[0], "gen_ai.user.message", user_message, spans[0]
289+
)
290+
291+
choice_event = {
292+
"index": 0,
293+
"finish_reason": "stop",
294+
"message": {
295+
"role": "assistant",
296+
"content": response.choices[0].message.content,
297+
},
298+
}
299+
assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0])
300+
301+
259302
@pytest.mark.vcr()
260303
@pytest.mark.asyncio()
261304
async def test_async_chat_completion_tool_calls_with_content(

0 commit comments

Comments
 (0)