Skip to content

Commit 6bad350

Browse files
committed
openai: Use "gen_ai.output.type" instead of deprecated "gen_ai.openai.request.response_format" attribute.
1 parent 6030284 commit 6bad350

File tree

3 files changed

+11
-11
lines changed

3 files changed

+11
-11
lines changed

instrumentation/elastic-opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/helpers.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,10 @@
2020

2121
from opentelemetry._events import Event, EventLogger
2222
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
23-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT,
2423
GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
2524
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
2625
GEN_AI_OPERATION_NAME,
26+
GEN_AI_OUTPUT_TYPE,
2727
GEN_AI_REQUEST_CHOICE_COUNT,
2828
GEN_AI_REQUEST_FREQUENCY_PENALTY,
2929
GEN_AI_REQUEST_MAX_TOKENS,
@@ -166,13 +166,13 @@ def _is_set(value):
166166
# response_format may be string or object with a string in the `type` key
167167
if isinstance(response_format, Mapping):
168168
if _is_set(response_format_type := response_format.get("type")):
169-
span_attributes[GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT] = response_format_type
169+
span_attributes[GEN_AI_OUTPUT_TYPE] = response_format_type
170170
elif isinstance(response_format, str):
171-
span_attributes[GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT] = response_format
171+
span_attributes[GEN_AI_OUTPUT_TYPE] = response_format
172172
else:
173173
# Assume structured output lazily parsed to a schema via type_to_response_format_param or similar.
174174
# e.g. pydantic._internal._model_construction.ModelMetaclass
175-
span_attributes[GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT] = "json_schema"
175+
span_attributes[GEN_AI_OUTPUT_TYPE] = "json_schema"
176176

177177
return span_attributes
178178

instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_beta_chat_completions.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,10 @@
2828
from opentelemetry._logs import LogRecord
2929
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
3030
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
31-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT,
3231
GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
3332
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
3433
GEN_AI_OPERATION_NAME,
34+
GEN_AI_OUTPUT_TYPE,
3535
GEN_AI_REQUEST_CHOICE_COUNT,
3636
GEN_AI_REQUEST_FREQUENCY_PENALTY,
3737
GEN_AI_REQUEST_MAX_TOKENS,
@@ -248,7 +248,7 @@ def test_chat_all_the_client_options(default_openai_env, trace_exporter, metrics
248248
expected_attrs = {
249249
GEN_AI_REQUEST_SEED: 100,
250250
GEN_AI_OPENAI_REQUEST_SERVICE_TIER: "default",
251-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: "text",
251+
GEN_AI_OUTPUT_TYPE: "text",
252252
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default",
253253
GEN_AI_OPERATION_NAME: "chat",
254254
GEN_AI_REQUEST_FREQUENCY_PENALTY: 0,
@@ -1545,7 +1545,7 @@ def test_parse_response_format_json_object_with_capture_message_content(
15451545
address, port = address_and_port(client)
15461546
assert dict(span.attributes) == {
15471547
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default",
1548-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: "json_object",
1548+
GEN_AI_OUTPUT_TYPE: "json_object",
15491549
GEN_AI_OPERATION_NAME: "chat",
15501550
GEN_AI_REQUEST_MODEL: TEST_CHAT_MODEL,
15511551
GEN_AI_SYSTEM: "openai",
@@ -1620,7 +1620,7 @@ def test_parse_response_format_structured_output_with_capture_message_content(
16201620
address, port = address_and_port(client)
16211621
assert dict(span.attributes) == {
16221622
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default",
1623-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: "json_schema",
1623+
GEN_AI_OUTPUT_TYPE: "json_schema",
16241624
GEN_AI_OPERATION_NAME: "chat",
16251625
GEN_AI_REQUEST_MODEL: TEST_CHAT_MODEL,
16261626
GEN_AI_SYSTEM: "openai",

instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_chat_completions.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,10 @@
2727
from opentelemetry._logs import LogRecord
2828
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
2929
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
30-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT,
3130
GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
3231
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
3332
GEN_AI_OPERATION_NAME,
33+
GEN_AI_OUTPUT_TYPE,
3434
GEN_AI_REQUEST_CHOICE_COUNT,
3535
GEN_AI_REQUEST_FREQUENCY_PENALTY,
3636
GEN_AI_REQUEST_MAX_TOKENS,
@@ -332,7 +332,7 @@ def test_chat_all_the_client_options(default_openai_env, trace_exporter, metrics
332332
expected_attrs = {
333333
GEN_AI_REQUEST_SEED: 100,
334334
GEN_AI_OPENAI_REQUEST_SERVICE_TIER: "default",
335-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: "text",
335+
GEN_AI_OUTPUT_TYPE: "text",
336336
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default",
337337
GEN_AI_OPERATION_NAME: "chat",
338338
GEN_AI_REQUEST_FREQUENCY_PENALTY: 0,
@@ -1204,7 +1204,7 @@ def test_chat_stream_all_the_client_options(default_openai_env, trace_exporter,
12041204
address, port = address_and_port(client)
12051205
expected_attrs = {
12061206
GEN_AI_REQUEST_SEED: 100,
1207-
GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: "text",
1207+
GEN_AI_OUTPUT_TYPE: "text",
12081208
GEN_AI_OPENAI_REQUEST_SERVICE_TIER: "default",
12091209
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default",
12101210
GEN_AI_OPERATION_NAME: "chat",

0 commit comments

Comments
 (0)