Skip to content

Commit fc46d03

Browse files
authored
Merge branch 'main' into add-type-hints-to-dbapi
2 parents 0529154 + ecf5529 commit fc46d03

File tree

16 files changed

+81
-50
lines changed

16 files changed

+81
-50
lines changed

instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## Unreleased
99

10+
- Coerce openai response_format to semconv format
11+
([#3073](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3073))
1012
- Add example to `opentelemetry-instrumentation-openai-v2`
1113
([#3006](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3006))
1214
- Support for `AsyncOpenAI/AsyncCompletions` ([#2984](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2984))

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
from os import environ
16-
from typing import Optional, Union
16+
from typing import Mapping, Optional, Union
1717
from urllib.parse import urlparse
1818

1919
from httpx import URL
@@ -202,12 +202,23 @@ def get_llm_request_attributes(
202202
GenAIAttributes.GEN_AI_REQUEST_FREQUENCY_PENALTY: kwargs.get(
203203
"frequency_penalty"
204204
),
205-
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: kwargs.get(
206-
"response_format"
207-
),
208205
GenAIAttributes.GEN_AI_OPENAI_REQUEST_SEED: kwargs.get("seed"),
209206
}
210207

208+
if (response_format := kwargs.get("response_format")) is not None:
209+
# response_format may be string or object with a string in the `type` key
210+
if isinstance(response_format, Mapping):
211+
if (
212+
response_format_type := response_format.get("type")
213+
) is not None:
214+
attributes[
215+
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
216+
] = response_format_type
217+
else:
218+
attributes[
219+
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
220+
] = response_format
221+
211222
set_server_address_and_port(client_instance, attributes)
212223
service_tier = kwargs.get("service_tier")
213224
attributes[GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER] = (

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_chat_completion_extra_params.yaml

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,9 @@ interactions:
1010
],
1111
"model": "gpt-4o-mini",
1212
"max_tokens": 50,
13+
"response_format": {
14+
"type": "text"
15+
},
1316
"seed": 42,
1417
"stream": false,
1518
"temperature": 0.5,
@@ -25,7 +28,7 @@ interactions:
2528
connection:
2629
- keep-alive
2730
content-length:
28-
- '183'
31+
- '220'
2932
content-type:
3033
- application/json
3134
host:
@@ -45,16 +48,16 @@ interactions:
4548
x-stainless-runtime:
4649
- CPython
4750
x-stainless-runtime-version:
48-
- 3.12.5
51+
- 3.12.7
4952
method: POST
5053
uri: https://api.openai.com/v1/chat/completions
5154
response:
5255
body:
5356
string: |-
5457
{
55-
"id": "chatcmpl-ASv9WMTAMZY4O1EImv3csZa6Ch7KI",
58+
"id": "chatcmpl-AbMH3rR6OBMN9hG5w0TRrezuiHLMr",
5659
"object": "chat.completion",
57-
"created": 1731456242,
60+
"created": 1733467121,
5861
"model": "gpt-4o-mini-2024-07-18",
5962
"choices": [
6063
{
@@ -84,19 +87,19 @@ interactions:
8487
}
8588
},
8689
"service_tier": "default",
87-
"system_fingerprint": "fp_0ba0d124f1"
90+
"system_fingerprint": "fp_bba3c8e70b"
8891
}
8992
headers:
9093
CF-Cache-Status:
9194
- DYNAMIC
9295
CF-RAY:
93-
- 8e1a8088f867e167-MRS
96+
- 8eda4640ead3e535-KUL
9497
Connection:
9598
- keep-alive
9699
Content-Type:
97100
- application/json
98101
Date:
99-
- Wed, 13 Nov 2024 00:04:02 GMT
102+
- Fri, 06 Dec 2024 06:38:42 GMT
100103
Server:
101104
- cloudflare
102105
Set-Cookie: test_set_cookie
@@ -112,25 +115,25 @@ interactions:
112115
- '825'
113116
openai-organization: test_openai_org_id
114117
openai-processing-ms:
115-
- '488'
118+
- '835'
116119
openai-version:
117120
- '2020-10-01'
118121
strict-transport-security:
119122
- max-age=31536000; includeSubDomains; preload
120123
x-ratelimit-limit-requests:
121-
- '30000'
124+
- '10000'
122125
x-ratelimit-limit-tokens:
123-
- '150000000'
126+
- '200000'
124127
x-ratelimit-remaining-requests:
125-
- '29999'
128+
- '9999'
126129
x-ratelimit-remaining-tokens:
127-
- '149999943'
130+
- '199943'
128131
x-ratelimit-reset-requests:
129-
- 2ms
132+
- 8.64s
130133
x-ratelimit-reset-tokens:
131-
- 0s
134+
- 16ms
132135
x-request-id:
133-
- req_6df08d6267415e8f5db3628a6757edad
136+
- req_fea877c0a861ff92a6a5217247681f24
134137
status:
135138
code: 200
136139
message: OK

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion_extra_params.yaml

Lines changed: 17 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,9 @@ interactions:
1010
],
1111
"model": "gpt-4o-mini",
1212
"max_tokens": 50,
13+
"response_format": {
14+
"type": "text"
15+
},
1316
"seed": 42,
1417
"stream": false,
1518
"temperature": 0.5,
@@ -25,13 +28,13 @@ interactions:
2528
connection:
2629
- keep-alive
2730
content-length:
28-
- '183'
31+
- '220'
2932
content-type:
3033
- application/json
3134
host:
3235
- api.openai.com
3336
user-agent:
34-
- OpenAI/Python 1.54.3
37+
- OpenAI/Python 1.26.0
3538
x-stainless-arch:
3639
- arm64
3740
x-stainless-async:
@@ -41,22 +44,20 @@ interactions:
4144
x-stainless-os:
4245
- MacOS
4346
x-stainless-package-version:
44-
- 1.54.3
45-
x-stainless-retry-count:
46-
- '0'
47+
- 1.26.0
4748
x-stainless-runtime:
4849
- CPython
4950
x-stainless-runtime-version:
50-
- 3.12.6
51+
- 3.12.7
5152
method: POST
5253
uri: https://api.openai.com/v1/chat/completions
5354
response:
5455
body:
5556
string: |-
5657
{
57-
"id": "chatcmpl-ASYMT7913Sp58qhZqQgY7g7Ia2J4M",
58+
"id": "chatcmpl-AbMH70fQA9lMPIClvBPyBSjqJBm9F",
5859
"object": "chat.completion",
59-
"created": 1731368633,
60+
"created": 1733467125,
6061
"model": "gpt-4o-mini-2024-07-18",
6162
"choices": [
6263
{
@@ -86,19 +87,17 @@ interactions:
8687
}
8788
},
8889
"service_tier": "default",
89-
"system_fingerprint": "fp_0ba0d124f1"
90+
"system_fingerprint": "fp_0705bf87c0"
9091
}
9192
headers:
92-
CF-Cache-Status:
93-
- DYNAMIC
9493
CF-RAY:
95-
- 8e1225a3f8e9ce65-SIN
94+
- 8eda465e8fe9e58c-KUL
9695
Connection:
9796
- keep-alive
9897
Content-Type:
9998
- application/json
10099
Date:
101-
- Mon, 11 Nov 2024 23:43:53 GMT
100+
- Fri, 06 Dec 2024 06:38:46 GMT
102101
Server:
103102
- cloudflare
104103
Set-Cookie: test_set_cookie
@@ -110,11 +109,13 @@ interactions:
110109
- X-Request-ID
111110
alt-svc:
112111
- h3=":443"; ma=86400
112+
cf-cache-status:
113+
- DYNAMIC
113114
content-length:
114115
- '825'
115116
openai-organization: test_openai_org_id
116117
openai-processing-ms:
117-
- '431'
118+
- '558'
118119
openai-version:
119120
- '2020-10-01'
120121
strict-transport-security:
@@ -128,11 +129,11 @@ interactions:
128129
x-ratelimit-remaining-tokens:
129130
- '199943'
130131
x-ratelimit-reset-requests:
131-
- 14.746s
132+
- 12.967s
132133
x-ratelimit-reset-tokens:
133134
- 16ms
134135
x-request-id:
135-
- req_81e29a8992ea8001c0240bd990acf0ab
136+
- req_22ff608d47a299f0780f52360631eabb
136137
status:
137138
code: 200
138139
message: OK

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,7 @@ async def test_async_chat_completion_extra_params(
158158
max_tokens=50,
159159
stream=False,
160160
extra_body={"service_tier": "default"},
161+
response_format={"type": "text"},
161162
)
162163

163164
spans = span_exporter.get_finished_spans()
@@ -173,6 +174,12 @@ async def test_async_chat_completion_extra_params(
173174
spans[0].attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER]
174175
== "default"
175176
)
177+
assert (
178+
spans[0].attributes[
179+
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
180+
]
181+
== "text"
182+
)
176183

177184

178185
@pytest.mark.vcr()

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,7 @@ def test_chat_completion_extra_params(
151151
max_tokens=50,
152152
stream=False,
153153
extra_body={"service_tier": "default"},
154+
response_format={"type": "text"},
154155
)
155156

156157
spans = span_exporter.get_finished_spans()
@@ -166,6 +167,12 @@ def test_chat_completion_extra_params(
166167
spans[0].attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER]
167168
== "default"
168169
)
170+
assert (
171+
spans[0].attributes[
172+
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
173+
]
174+
== "text"
175+
)
169176

170177

171178
@pytest.mark.vcr()

instrumentation/opentelemetry-instrumentation-aiohttp-client/test-requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ requests==2.32.3
2424
tomli==2.0.1
2525
typing_extensions==4.12.2
2626
urllib3==2.2.2
27-
Werkzeug==3.0.3
27+
Werkzeug==3.0.6
2828
wrapt==1.16.0
2929
yarl==1.9.4
3030
zipp==3.19.2

instrumentation/opentelemetry-instrumentation-aiokafka/src/opentelemetry/instrumentation/aiokafka/__init__.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
Usage
1919
-----
2020
21-
..code:: python
21+
.. code:: python
2222
2323
from opentelemetry.instrumentation.aiokafka import AIOKafkaInstrumentor
2424
from aiokafka import AIOKafkaProducer, AIOKafkaConsumer
@@ -45,7 +45,8 @@ def async_produce_hook(span: Span, args, kwargs)
4545
def async_consume_hook(span: Span, record: kafka.record.ABCRecord, args, kwargs)
4646
for example:
4747
48-
.. code: python
48+
.. code:: python
49+
4950
from opentelemetry.instrumentation.kafka import AIOKafkaInstrumentor
5051
from aiokafka import AIOKafkaProducer, AIOKafkaConsumer
5152

instrumentation/opentelemetry-instrumentation-botocore/test-requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ six==1.16.0
2929
tomli==2.0.1
3030
typing_extensions==4.12.2
3131
urllib3==1.26.19
32-
Werkzeug==3.0.3
32+
Werkzeug==3.0.6
3333
wrapt==1.16.0
3434
xmltodict==0.13.0
3535
zipp==3.19.2

instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
Usage
1919
-----
2020
21-
.. code-block:: python
21+
.. code:: python
2222
2323
from opentelemetry.instrumentation.confluent_kafka import ConfluentKafkaInstrumentor
2424
from confluent_kafka import Producer, Consumer
@@ -54,7 +54,6 @@ def basic_consume_loop(consumer, topics):
5454
consumer.close()
5555
5656
basic_consume_loop(consumer, "my-topic")
57-
---
5857
5958
The _instrument method accepts the following keyword args:
6059
tracer_provider (TracerProvider) - an optional tracer provider
@@ -95,7 +94,6 @@ def instrument_consumer(consumer: Consumer, tracer_provider=None)
9594
p.produce('my-topic',b'raw_bytes')
9695
msg = c.poll()
9796
98-
___
9997
"""
10098

10199
from typing import Collection

0 commit comments

Comments
 (0)