Skip to content

Commit 6134d5a

Browse files
opentelemetry-instrumentation-openai-v2: coerce openai response_format to semconv format (open-telemetry#3073)
* opentelemetry-instrumentation-openai-v2: coerce openai response_format to semconv format Signed-off-by: Adrian Cole <[email protected]> * changelog Signed-off-by: Adrian Cole <[email protected]> --------- Signed-off-by: Adrian Cole <[email protected]>
1 parent 6c92f38 commit 6134d5a

File tree

6 files changed

+66
-35
lines changed

6 files changed

+66
-35
lines changed

instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## Unreleased
99

10+
- Coerce openai response_format to semconv format
11+
([#3073](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3073))
1012
- Add example to `opentelemetry-instrumentation-openai-v2`
1113
([#3006](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3006))
1214
- Support for `AsyncOpenAI/AsyncCompletions` ([#2984](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2984))

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
from os import environ
16-
from typing import Optional, Union
16+
from typing import Mapping, Optional, Union
1717
from urllib.parse import urlparse
1818

1919
from httpx import URL
@@ -202,12 +202,23 @@ def get_llm_request_attributes(
202202
GenAIAttributes.GEN_AI_REQUEST_FREQUENCY_PENALTY: kwargs.get(
203203
"frequency_penalty"
204204
),
205-
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: kwargs.get(
206-
"response_format"
207-
),
208205
GenAIAttributes.GEN_AI_OPENAI_REQUEST_SEED: kwargs.get("seed"),
209206
}
210207

208+
if (response_format := kwargs.get("response_format")) is not None:
209+
# response_format may be string or object with a string in the `type` key
210+
if isinstance(response_format, Mapping):
211+
if (
212+
response_format_type := response_format.get("type")
213+
) is not None:
214+
attributes[
215+
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
216+
] = response_format_type
217+
else:
218+
attributes[
219+
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
220+
] = response_format
221+
211222
set_server_address_and_port(client_instance, attributes)
212223
service_tier = kwargs.get("service_tier")
213224
attributes[GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER] = (

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_chat_completion_extra_params.yaml

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,9 @@ interactions:
1010
],
1111
"model": "gpt-4o-mini",
1212
"max_tokens": 50,
13+
"response_format": {
14+
"type": "text"
15+
},
1316
"seed": 42,
1417
"stream": false,
1518
"temperature": 0.5,
@@ -25,7 +28,7 @@ interactions:
2528
connection:
2629
- keep-alive
2730
content-length:
28-
- '183'
31+
- '220'
2932
content-type:
3033
- application/json
3134
host:
@@ -45,16 +48,16 @@ interactions:
4548
x-stainless-runtime:
4649
- CPython
4750
x-stainless-runtime-version:
48-
- 3.12.5
51+
- 3.12.7
4952
method: POST
5053
uri: https://api.openai.com/v1/chat/completions
5154
response:
5255
body:
5356
string: |-
5457
{
55-
"id": "chatcmpl-ASv9WMTAMZY4O1EImv3csZa6Ch7KI",
58+
"id": "chatcmpl-AbMH3rR6OBMN9hG5w0TRrezuiHLMr",
5659
"object": "chat.completion",
57-
"created": 1731456242,
60+
"created": 1733467121,
5861
"model": "gpt-4o-mini-2024-07-18",
5962
"choices": [
6063
{
@@ -84,19 +87,19 @@ interactions:
8487
}
8588
},
8689
"service_tier": "default",
87-
"system_fingerprint": "fp_0ba0d124f1"
90+
"system_fingerprint": "fp_bba3c8e70b"
8891
}
8992
headers:
9093
CF-Cache-Status:
9194
- DYNAMIC
9295
CF-RAY:
93-
- 8e1a8088f867e167-MRS
96+
- 8eda4640ead3e535-KUL
9497
Connection:
9598
- keep-alive
9699
Content-Type:
97100
- application/json
98101
Date:
99-
- Wed, 13 Nov 2024 00:04:02 GMT
102+
- Fri, 06 Dec 2024 06:38:42 GMT
100103
Server:
101104
- cloudflare
102105
Set-Cookie: test_set_cookie
@@ -112,25 +115,25 @@ interactions:
112115
- '825'
113116
openai-organization: test_openai_org_id
114117
openai-processing-ms:
115-
- '488'
118+
- '835'
116119
openai-version:
117120
- '2020-10-01'
118121
strict-transport-security:
119122
- max-age=31536000; includeSubDomains; preload
120123
x-ratelimit-limit-requests:
121-
- '30000'
124+
- '10000'
122125
x-ratelimit-limit-tokens:
123-
- '150000000'
126+
- '200000'
124127
x-ratelimit-remaining-requests:
125-
- '29999'
128+
- '9999'
126129
x-ratelimit-remaining-tokens:
127-
- '149999943'
130+
- '199943'
128131
x-ratelimit-reset-requests:
129-
- 2ms
132+
- 8.64s
130133
x-ratelimit-reset-tokens:
131-
- 0s
134+
- 16ms
132135
x-request-id:
133-
- req_6df08d6267415e8f5db3628a6757edad
136+
- req_fea877c0a861ff92a6a5217247681f24
134137
status:
135138
code: 200
136139
message: OK

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion_extra_params.yaml

Lines changed: 17 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,9 @@ interactions:
1010
],
1111
"model": "gpt-4o-mini",
1212
"max_tokens": 50,
13+
"response_format": {
14+
"type": "text"
15+
},
1316
"seed": 42,
1417
"stream": false,
1518
"temperature": 0.5,
@@ -25,13 +28,13 @@ interactions:
2528
connection:
2629
- keep-alive
2730
content-length:
28-
- '183'
31+
- '220'
2932
content-type:
3033
- application/json
3134
host:
3235
- api.openai.com
3336
user-agent:
34-
- OpenAI/Python 1.54.3
37+
- OpenAI/Python 1.26.0
3538
x-stainless-arch:
3639
- arm64
3740
x-stainless-async:
@@ -41,22 +44,20 @@ interactions:
4144
x-stainless-os:
4245
- MacOS
4346
x-stainless-package-version:
44-
- 1.54.3
45-
x-stainless-retry-count:
46-
- '0'
47+
- 1.26.0
4748
x-stainless-runtime:
4849
- CPython
4950
x-stainless-runtime-version:
50-
- 3.12.6
51+
- 3.12.7
5152
method: POST
5253
uri: https://api.openai.com/v1/chat/completions
5354
response:
5455
body:
5556
string: |-
5657
{
57-
"id": "chatcmpl-ASYMT7913Sp58qhZqQgY7g7Ia2J4M",
58+
"id": "chatcmpl-AbMH70fQA9lMPIClvBPyBSjqJBm9F",
5859
"object": "chat.completion",
59-
"created": 1731368633,
60+
"created": 1733467125,
6061
"model": "gpt-4o-mini-2024-07-18",
6162
"choices": [
6263
{
@@ -86,19 +87,17 @@ interactions:
8687
}
8788
},
8889
"service_tier": "default",
89-
"system_fingerprint": "fp_0ba0d124f1"
90+
"system_fingerprint": "fp_0705bf87c0"
9091
}
9192
headers:
92-
CF-Cache-Status:
93-
- DYNAMIC
9493
CF-RAY:
95-
- 8e1225a3f8e9ce65-SIN
94+
- 8eda465e8fe9e58c-KUL
9695
Connection:
9796
- keep-alive
9897
Content-Type:
9998
- application/json
10099
Date:
101-
- Mon, 11 Nov 2024 23:43:53 GMT
100+
- Fri, 06 Dec 2024 06:38:46 GMT
102101
Server:
103102
- cloudflare
104103
Set-Cookie: test_set_cookie
@@ -110,11 +109,13 @@ interactions:
110109
- X-Request-ID
111110
alt-svc:
112111
- h3=":443"; ma=86400
112+
cf-cache-status:
113+
- DYNAMIC
113114
content-length:
114115
- '825'
115116
openai-organization: test_openai_org_id
116117
openai-processing-ms:
117-
- '431'
118+
- '558'
118119
openai-version:
119120
- '2020-10-01'
120121
strict-transport-security:
@@ -128,11 +129,11 @@ interactions:
128129
x-ratelimit-remaining-tokens:
129130
- '199943'
130131
x-ratelimit-reset-requests:
131-
- 14.746s
132+
- 12.967s
132133
x-ratelimit-reset-tokens:
133134
- 16ms
134135
x-request-id:
135-
- req_81e29a8992ea8001c0240bd990acf0ab
136+
- req_22ff608d47a299f0780f52360631eabb
136137
status:
137138
code: 200
138139
message: OK

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,7 @@ async def test_async_chat_completion_extra_params(
158158
max_tokens=50,
159159
stream=False,
160160
extra_body={"service_tier": "default"},
161+
response_format={"type": "text"},
161162
)
162163

163164
spans = span_exporter.get_finished_spans()
@@ -173,6 +174,12 @@ async def test_async_chat_completion_extra_params(
173174
spans[0].attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER]
174175
== "default"
175176
)
177+
assert (
178+
spans[0].attributes[
179+
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
180+
]
181+
== "text"
182+
)
176183

177184

178185
@pytest.mark.vcr()

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,7 @@ def test_chat_completion_extra_params(
151151
max_tokens=50,
152152
stream=False,
153153
extra_body={"service_tier": "default"},
154+
response_format={"type": "text"},
154155
)
155156

156157
spans = span_exporter.get_finished_spans()
@@ -166,6 +167,12 @@ def test_chat_completion_extra_params(
166167
spans[0].attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER]
167168
== "default"
168169
)
170+
assert (
171+
spans[0].attributes[
172+
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
173+
]
174+
== "text"
175+
)
169176

170177

171178
@pytest.mark.vcr()

0 commit comments

Comments
 (0)