diff --git a/instrumentation/elastic-opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/helpers.py b/instrumentation/elastic-opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/helpers.py index 82173a2..1d75555 100644 --- a/instrumentation/elastic-opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/helpers.py +++ b/instrumentation/elastic-opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/helpers.py @@ -21,7 +21,6 @@ from opentelemetry._events import Event, EventLogger from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import ( GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT, - GEN_AI_OPENAI_REQUEST_SEED, GEN_AI_OPENAI_REQUEST_SERVICE_TIER, GEN_AI_OPENAI_RESPONSE_SERVICE_TIER, GEN_AI_OPERATION_NAME, @@ -30,6 +29,7 @@ GEN_AI_REQUEST_MAX_TOKENS, GEN_AI_REQUEST_MODEL, GEN_AI_REQUEST_PRESENCE_PENALTY, + GEN_AI_REQUEST_SEED, GEN_AI_REQUEST_STOP_SEQUENCES, GEN_AI_REQUEST_TEMPERATURE, GEN_AI_REQUEST_TOP_P, @@ -159,7 +159,7 @@ def _is_set(value): stop_sequences = [stop_sequences] span_attributes[GEN_AI_REQUEST_STOP_SEQUENCES] = stop_sequences if _is_set(seed := kwargs.get("seed")): - span_attributes[GEN_AI_OPENAI_REQUEST_SEED] = seed + span_attributes[GEN_AI_REQUEST_SEED] = seed if _is_set(service_tier := kwargs.get("service_tier")): span_attributes[GEN_AI_OPENAI_REQUEST_SERVICE_TIER] = service_tier if _is_set(response_format := kwargs.get("response_format")): diff --git a/instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_beta_chat_completions.py b/instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_beta_chat_completions.py index 91b775a..a746cad 100644 --- a/instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_beta_chat_completions.py +++ b/instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_beta_chat_completions.py @@ -29,7 +29,6 @@ from opentelemetry.instrumentation.openai import OpenAIInstrumentor from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import ( GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT, - GEN_AI_OPENAI_REQUEST_SEED, GEN_AI_OPENAI_REQUEST_SERVICE_TIER, GEN_AI_OPENAI_RESPONSE_SERVICE_TIER, GEN_AI_OPERATION_NAME, @@ -38,6 +37,7 @@ GEN_AI_REQUEST_MAX_TOKENS, GEN_AI_REQUEST_MODEL, GEN_AI_REQUEST_PRESENCE_PENALTY, + GEN_AI_REQUEST_SEED, GEN_AI_REQUEST_STOP_SEQUENCES, GEN_AI_REQUEST_TEMPERATURE, GEN_AI_REQUEST_TOP_P, @@ -246,7 +246,7 @@ def test_chat_all_the_client_options(default_openai_env, trace_exporter, metrics address, port = address_and_port(client) expected_attrs = { - GEN_AI_OPENAI_REQUEST_SEED: 100, + GEN_AI_REQUEST_SEED: 100, GEN_AI_OPENAI_REQUEST_SERVICE_TIER: "default", GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: "text", GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default", diff --git a/instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_chat_completions.py b/instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_chat_completions.py index 6a68e19..b3dc314 100644 --- a/instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_chat_completions.py +++ b/instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_chat_completions.py @@ -28,7 +28,6 @@ from opentelemetry.instrumentation.openai import OpenAIInstrumentor from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import ( GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT, - GEN_AI_OPENAI_REQUEST_SEED, GEN_AI_OPENAI_REQUEST_SERVICE_TIER, GEN_AI_OPENAI_RESPONSE_SERVICE_TIER, GEN_AI_OPERATION_NAME, @@ -37,6 +36,7 @@ GEN_AI_REQUEST_MAX_TOKENS, GEN_AI_REQUEST_MODEL, GEN_AI_REQUEST_PRESENCE_PENALTY, + GEN_AI_REQUEST_SEED, GEN_AI_REQUEST_STOP_SEQUENCES, GEN_AI_REQUEST_TEMPERATURE, GEN_AI_REQUEST_TOP_P, @@ -330,7 +330,7 @@ def test_chat_all_the_client_options(default_openai_env, trace_exporter, metrics address, port = address_and_port(client) expected_attrs = { - GEN_AI_OPENAI_REQUEST_SEED: 100, + GEN_AI_REQUEST_SEED: 100, GEN_AI_OPENAI_REQUEST_SERVICE_TIER: "default", GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: "text", GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default", @@ -1203,7 +1203,7 @@ def test_chat_stream_all_the_client_options(default_openai_env, trace_exporter, address, port = address_and_port(client) expected_attrs = { - GEN_AI_OPENAI_REQUEST_SEED: 100, + GEN_AI_REQUEST_SEED: 100, GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: "text", GEN_AI_OPENAI_REQUEST_SERVICE_TIER: "default", GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default",