Skip to content

Commit 9e9314d

Browse files
estolfoxrmx
andauthored
Add gen_ai.request.choice.count to openai instrumentation (elastic#75)
* Add gen_ai.request.choice.count to openai instrumentation * Add CHANGELOG entry * Fix formatting * CHANGELOG is updated when there's a release, remove CHANGELOG entry * Only set choice count if n != 1 * Update instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_chat_completions.py Co-authored-by: Riccardo Magliocchetti <[email protected]> --------- Co-authored-by: Riccardo Magliocchetti <[email protected]>
1 parent c608fb6 commit 9e9314d

File tree

4 files changed

+167
-0
lines changed

4 files changed

+167
-0
lines changed

instrumentation/elastic-opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/helpers.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
2626
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
2727
GEN_AI_OPERATION_NAME,
28+
GEN_AI_REQUEST_CHOICE_COUNT,
2829
GEN_AI_REQUEST_FREQUENCY_PENALTY,
2930
GEN_AI_REQUEST_MAX_TOKENS,
3031
GEN_AI_REQUEST_MODEL,
@@ -141,6 +142,8 @@ def _is_set(value):
141142
if client := getattr(instance, "_client", None):
142143
span_attributes.update(_attributes_from_client(client))
143144

145+
if _is_set(choice_count := kwargs.get("n")) and choice_count != 1:
146+
span_attributes[GEN_AI_REQUEST_CHOICE_COUNT] = choice_count
144147
if _is_set(frequency_penalty := kwargs.get("frequency_penalty")):
145148
span_attributes[GEN_AI_REQUEST_FREQUENCY_PENALTY] = frequency_penalty
146149
if _is_set(max_tokens := kwargs.get("max_completion_tokens", kwargs.get("max_tokens"))):
Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
interactions:
2+
- request:
3+
body: |-
4+
{
5+
"messages": [
6+
{
7+
"role": "user",
8+
"content": "Answer in up to 3 words: Which ocean contains Bouvet Island?"
9+
}
10+
],
11+
"model": "gpt-4o-mini",
12+
"n": 1
13+
}
14+
headers:
15+
accept:
16+
- application/json
17+
accept-encoding:
18+
- gzip, deflate, zstd
19+
authorization:
20+
- Bearer test_openai_api_key
21+
connection:
22+
- keep-alive
23+
content-length:
24+
- '139'
25+
content-type:
26+
- application/json
27+
host:
28+
- api.openai.com
29+
user-agent:
30+
- OpenAI/Python 1.66.5
31+
x-stainless-arch:
32+
- arm64
33+
x-stainless-async:
34+
- 'false'
35+
x-stainless-lang:
36+
- python
37+
x-stainless-os:
38+
- MacOS
39+
x-stainless-package-version:
40+
- 1.66.5
41+
x-stainless-read-timeout:
42+
- '600'
43+
x-stainless-retry-count:
44+
- '0'
45+
x-stainless-runtime:
46+
- CPython
47+
x-stainless-runtime-version:
48+
- 3.12.0
49+
method: POST
50+
uri: https://api.openai.com/v1/chat/completions
51+
response:
52+
body:
53+
string: |-
54+
{
55+
"id": "chatcmpl-BL8K8arBjCHMDOxqQd5YGBeYphZGG",
56+
"object": "chat.completion",
57+
"created": 1744376584,
58+
"model": "gpt-4o-mini-2024-07-18",
59+
"choices": [
60+
{
61+
"index": 0,
62+
"message": {
63+
"role": "assistant",
64+
"content": "Atlantic Ocean.",
65+
"refusal": null,
66+
"annotations": []
67+
},
68+
"logprobs": null,
69+
"finish_reason": "stop"
70+
}
71+
],
72+
"usage": {
73+
"prompt_tokens": 22,
74+
"completion_tokens": 5,
75+
"total_tokens": 27,
76+
"prompt_tokens_details": {
77+
"cached_tokens": 0,
78+
"audio_tokens": 0
79+
},
80+
"completion_tokens_details": {
81+
"reasoning_tokens": 0,
82+
"audio_tokens": 0,
83+
"accepted_prediction_tokens": 0,
84+
"rejected_prediction_tokens": 0
85+
}
86+
},
87+
"service_tier": "default",
88+
"system_fingerprint": "fp_44added55e"
89+
}
90+
headers:
91+
CF-RAY:
92+
- 92eaae915b15e51d-TXL
93+
Connection:
94+
- keep-alive
95+
Content-Type:
96+
- application/json
97+
Date:
98+
- Fri, 11 Apr 2025 13:03:04 GMT
99+
Server:
100+
- cloudflare
101+
Set-Cookie: test_set_cookie
102+
Transfer-Encoding:
103+
- chunked
104+
X-Content-Type-Options:
105+
- nosniff
106+
access-control-expose-headers:
107+
- X-Request-ID
108+
alt-svc:
109+
- h3=":443"; ma=86400
110+
cf-cache-status:
111+
- DYNAMIC
112+
content-length:
113+
- '827'
114+
openai-organization: test_openai_org_id
115+
openai-processing-ms:
116+
- '170'
117+
openai-version:
118+
- '2020-10-01'
119+
strict-transport-security:
120+
- max-age=31536000; includeSubDomains; preload
121+
x-ratelimit-limit-requests:
122+
- '200'
123+
x-ratelimit-limit-tokens:
124+
- '100000'
125+
x-ratelimit-remaining-requests:
126+
- '197'
127+
x-ratelimit-remaining-tokens:
128+
- '99925'
129+
x-ratelimit-reset-requests:
130+
- 19m29.225s
131+
x-ratelimit-reset-tokens:
132+
- 32m9.545s
133+
x-request-id:
134+
- req_5ec52b920fea0d555ec3dbf813300fad
135+
status:
136+
code: 200
137+
message: OK
138+
version: 1

instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_beta_chat_completions.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
3434
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
3535
GEN_AI_OPERATION_NAME,
36+
GEN_AI_REQUEST_CHOICE_COUNT,
3637
GEN_AI_REQUEST_FREQUENCY_PENALTY,
3738
GEN_AI_REQUEST_MAX_TOKENS,
3839
GEN_AI_REQUEST_MODEL,
@@ -330,6 +331,7 @@ def test_chat_multiple_choices_with_capture_message_content(
330331
address, port = address_and_port(client)
331332
assert dict(span.attributes) == {
332333
GEN_AI_OPERATION_NAME: "chat",
334+
GEN_AI_REQUEST_CHOICE_COUNT: 2,
333335
GEN_AI_REQUEST_MODEL: TEST_CHAT_MODEL,
334336
GEN_AI_SYSTEM: "openai",
335337
GEN_AI_RESPONSE_ID: "chatcmpl-AfhuHpVEbcYGlsFuHOP60MtU4tIq9",

instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_chat_completions.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
3333
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
3434
GEN_AI_OPERATION_NAME,
35+
GEN_AI_REQUEST_CHOICE_COUNT,
3536
GEN_AI_REQUEST_FREQUENCY_PENALTY,
3637
GEN_AI_REQUEST_MAX_TOKENS,
3738
GEN_AI_REQUEST_MODEL,
@@ -130,6 +131,28 @@ def test_chat(default_openai_env, trace_exporter, metrics_reader, logs_exporter)
130131
)
131132

132133

134+
@pytest.mark.vcr()
135+
def test_chat_n_1(default_openai_env, trace_exporter, metrics_reader, logs_exporter):
136+
client = openai.OpenAI()
137+
138+
messages = [
139+
{
140+
"role": "user",
141+
"content": TEST_CHAT_INPUT,
142+
}
143+
]
144+
145+
chat_completion = client.chat.completions.create(model=TEST_CHAT_MODEL, messages=messages, n=1)
146+
147+
assert chat_completion.choices[0].message.content == "Atlantic Ocean."
148+
149+
spans = trace_exporter.get_finished_spans()
150+
assert len(spans) == 1
151+
152+
span = spans[0]
153+
assert GEN_AI_REQUEST_CHOICE_COUNT not in span.attributes
154+
155+
133156
@pytest.mark.skipif(OPENAI_VERSION < (1, 8, 0), reason="LegacyAPIResponse available")
134157
@pytest.mark.vcr()
135158
def test_chat_with_raw_response(default_openai_env, trace_exporter, metrics_reader, logs_exporter):
@@ -471,6 +494,7 @@ def test_chat_multiple_choices_with_capture_message_content(
471494
address, port = address_and_port(client)
472495
assert dict(span.attributes) == {
473496
GEN_AI_OPERATION_NAME: "chat",
497+
GEN_AI_REQUEST_CHOICE_COUNT: 2,
474498
GEN_AI_REQUEST_MODEL: TEST_CHAT_MODEL,
475499
GEN_AI_SYSTEM: "openai",
476500
GEN_AI_RESPONSE_ID: "chatcmpl-AfhuHpVEbcYGlsFuHOP60MtU4tIq9",

0 commit comments

Comments
 (0)