Skip to content

Commit 1720368

Browse files
authored
elastic-opentelemetry-instrumentation-openai: match proposed upstream semconv (elastic#36)
Update attributes to match open-telemetry/semantic-conventions#1603
1 parent ba69a8d commit 1720368

File tree

3 files changed

+13
-12
lines changed

3 files changed

+13
-12
lines changed

instrumentation/elastic-opentelemetry-instrumentation-openai/README.md

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,14 @@
22

33
An OpenTelemetry instrumentation for the `openai` client library.
44

5-
This instrumentation currently only supports instrumenting the Chat completions APIs.
5+
This instrumentation currently supports instrumenting the chat completions and the embeddings APIs.
66

77
We currently support the following features:
88
- `sync` and `async` chat completions
9-
- Streaming support
10-
- Functions calling with tools
9+
- Streaming support for chat completions
10+
- Functions calling with tools for chat completions
1111
- Client side metrics
12+
- Embeddings API calls
1213
- Following 1.28.0 Gen AI Semantic Conventions
1314

1415
## Installation
@@ -60,7 +61,7 @@ log events instead of span events.
6061
### Elastic specific semantic conventions
6162

6263
- New `embeddings` value for `gen_ai.operation.name`
63-
- New `gen_ai.request.encoding_format` attribute with openai specific values `[float, base64]`
64+
- New `gen_ai.request.encoding_formats` attribute with openai specific values `[[float], [base64]]`
6465

6566
## Development
6667

instrumentation/elastic-opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/helpers.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,8 @@
4848
EVENT_GEN_AI_SYSTEM_MESSAGE = "gen_ai.system.message"
4949
EVENT_GEN_AI_TOOL_MESSAGE = "gen_ai.tool.message"
5050

51-
# elastic specific attributes
52-
GEN_AI_REQUEST_ENCODING_FORMAT = "gen_ai.request.encoding_format"
51+
# not yet released attributes
52+
GEN_AI_REQUEST_ENCODING_FORMATS = "gen_ai.request.encoding_formats"
5353

5454
# As this is only used for a type annotation, only import from openai module
5555
# when running type checker like pyright since we otherwise don't want to import
@@ -204,7 +204,7 @@ def _get_embeddings_span_attributes_from_wrapper(instance, kwargs) -> Attributes
204204
span_attributes.update(_attributes_from_client(client))
205205

206206
if (encoding_format := kwargs.get("encoding_format")) is not None:
207-
span_attributes[GEN_AI_REQUEST_ENCODING_FORMAT] = encoding_format
207+
span_attributes[GEN_AI_REQUEST_ENCODING_FORMATS] = [encoding_format]
208208

209209
return span_attributes
210210

instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_embeddings.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
import openai
2020
import pytest
21-
from opentelemetry.instrumentation.openai.helpers import GEN_AI_REQUEST_ENCODING_FORMAT
21+
from opentelemetry.instrumentation.openai.helpers import GEN_AI_REQUEST_ENCODING_FORMATS
2222
from opentelemetry.trace import SpanKind, StatusCode
2323
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
2424
GEN_AI_OPERATION_NAME,
@@ -117,7 +117,7 @@ def test_all_the_client_options(provider_str, model, input_tokens, duration, tra
117117
GEN_AI_REQUEST_MODEL: model,
118118
GEN_AI_SYSTEM: "openai",
119119
GEN_AI_RESPONSE_MODEL: model,
120-
GEN_AI_REQUEST_ENCODING_FORMAT: "float",
120+
GEN_AI_REQUEST_ENCODING_FORMATS: ("float",),
121121
GEN_AI_USAGE_INPUT_TOKENS: input_tokens,
122122
SERVER_ADDRESS: provider.server_address,
123123
SERVER_PORT: provider.server_port,
@@ -159,7 +159,7 @@ def test_all_the_client_options_integration(provider_str, model, trace_exporter,
159159
GEN_AI_REQUEST_MODEL: model,
160160
GEN_AI_SYSTEM: "openai",
161161
GEN_AI_RESPONSE_MODEL: model,
162-
GEN_AI_REQUEST_ENCODING_FORMAT: "float",
162+
GEN_AI_REQUEST_ENCODING_FORMATS: ("float",),
163163
GEN_AI_USAGE_INPUT_TOKENS: response.usage.prompt_tokens,
164164
SERVER_ADDRESS: provider.server_address,
165165
SERVER_PORT: provider.server_port,
@@ -312,7 +312,7 @@ async def test_async_all_the_client_options(
312312
GEN_AI_REQUEST_MODEL: model,
313313
GEN_AI_SYSTEM: "openai",
314314
GEN_AI_RESPONSE_MODEL: model,
315-
GEN_AI_REQUEST_ENCODING_FORMAT: "float",
315+
GEN_AI_REQUEST_ENCODING_FORMATS: ("float",),
316316
GEN_AI_USAGE_INPUT_TOKENS: input_tokens,
317317
SERVER_ADDRESS: provider.server_address,
318318
SERVER_PORT: provider.server_port,
@@ -355,7 +355,7 @@ async def test_async_all_the_client_options_integration(provider_str, model, tra
355355
GEN_AI_REQUEST_MODEL: model,
356356
GEN_AI_SYSTEM: "openai",
357357
GEN_AI_RESPONSE_MODEL: model,
358-
GEN_AI_REQUEST_ENCODING_FORMAT: "float",
358+
GEN_AI_REQUEST_ENCODING_FORMATS: ("float",),
359359
GEN_AI_USAGE_INPUT_TOKENS: response.usage.prompt_tokens,
360360
SERVER_ADDRESS: provider.server_address,
361361
SERVER_PORT: provider.server_port,

0 commit comments

Comments
 (0)