Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## Unreleased

- Record prompt and completion events regardless of span sampling decision.
([#3226](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3226))

## Version 2.1b0 (2025-01-18)

- Coerce openai response_format to semconv format
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,11 +56,8 @@ def traced_method(wrapped, instance, args, kwargs):
attributes=span_attributes,
end_on_exit=False,
) as span:
if span.is_recording():
for message in kwargs.get("messages", []):
event_logger.emit(
message_to_event(message, capture_content)
)
for message in kwargs.get("messages", []):
event_logger.emit(message_to_event(message, capture_content))

start = default_timer()
result = None
Expand All @@ -76,6 +73,9 @@ def traced_method(wrapped, instance, args, kwargs):
_set_response_attributes(
span, result, event_logger, capture_content
)
for choice in getattr(result, "choices", []):
event_logger.emit(choice_to_event(choice, capture_content))

span.end()
return result

Expand Down Expand Up @@ -114,11 +114,8 @@ async def traced_method(wrapped, instance, args, kwargs):
attributes=span_attributes,
end_on_exit=False,
) as span:
if span.is_recording():
for message in kwargs.get("messages", []):
event_logger.emit(
message_to_event(message, capture_content)
)
for message in kwargs.get("messages", []):
event_logger.emit(message_to_event(message, capture_content))

start = default_timer()
result = None
Expand All @@ -134,6 +131,9 @@ async def traced_method(wrapped, instance, args, kwargs):
_set_response_attributes(
span, result, event_logger, capture_content
)
for choice in getattr(result, "choices", []):
event_logger.emit(choice_to_event(choice, capture_content))

span.end()
return result

Expand Down Expand Up @@ -228,12 +228,8 @@ def _set_response_attributes(
)

if getattr(result, "choices", None):
choices = result.choices
for choice in choices:
event_logger.emit(choice_to_event(choice, capture_content))

finish_reasons = []
for choice in choices:
for choice in result.choices:
finish_reasons.append(choice.finish_reason or "error")

set_span_attribute(
Expand Down Expand Up @@ -333,42 +329,43 @@ def setup(self):

def cleanup(self):
if self._span_started:
if self.response_model:
if self.span.is_recording():
if self.response_model:
set_span_attribute(
self.span,
GenAIAttributes.GEN_AI_RESPONSE_MODEL,
self.response_model,
)

if self.response_id:
set_span_attribute(
self.span,
GenAIAttributes.GEN_AI_RESPONSE_ID,
self.response_id,
)

set_span_attribute(
self.span,
GenAIAttributes.GEN_AI_RESPONSE_MODEL,
self.response_model,
GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS,
self.prompt_tokens,
)

if self.response_id:
set_span_attribute(
self.span,
GenAIAttributes.GEN_AI_RESPONSE_ID,
self.response_id,
GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
self.completion_tokens,
)

set_span_attribute(
self.span,
GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS,
self.prompt_tokens,
)
set_span_attribute(
self.span,
GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
self.completion_tokens,
)

set_span_attribute(
self.span,
GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
self.service_tier,
)
set_span_attribute(
self.span,
GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
self.service_tier,
)

set_span_attribute(
self.span,
GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS,
self.finish_reasons,
)
set_span_attribute(
self.span,
GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS,
self.finish_reasons,
)

for idx, choice in enumerate(self.choice_buffers):
message = {"role": "assistant"}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
interactions:
- request:
body: |-
{
"messages": [
{
"role": "user",
"content": "Say this is a test"
}
],
"model": "gpt-4",
"stream": true,
"stream_options": {
"include_usage": true
}
}
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
authorization:
- Bearer test_openai_api_key
connection:
- keep-alive
content-length:
- '142'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- AsyncOpenAI/Python 1.26.0
x-stainless-arch:
- arm64
x-stainless-async:
- async:asyncio
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.26.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.5
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: |+
data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null}

data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"This"},"logprobs":null,"finish_reason":null}],"usage":null}

data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" is"},"logprobs":null,"finish_reason":null}],"usage":null}

data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" a"},"logprobs":null,"finish_reason":null}],"usage":null}

data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" test"},"logprobs":null,"finish_reason":null}],"usage":null}

data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}],"usage":null}

data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null}

data: {"id":"chatcmpl-ASv9ejXDUtAhGOJJxWuw026zdinc4","object":"chat.completion.chunk","created":1731456250,"model":"gpt-4-0613","system_fingerprint":null,"choices":[],"usage":{"prompt_tokens":12,"completion_tokens":5,"total_tokens":17,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}}}

data: [DONE]

headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8e1a80bd2f31e1e5-MRS
Connection:
- keep-alive
Content-Type:
- text/event-stream; charset=utf-8
Date:
- Wed, 13 Nov 2024 00:04:11 GMT
Server:
- cloudflare
Set-Cookie: test_set_cookie
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization: test_openai_org_id
openai-processing-ms:
- '196'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '1000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '999977'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 1ms
x-request-id:
- req_cc9204ae23338b130df11c8c5b5f31af
status:
code: 200
message: OK
version: 1
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
interactions:
- request:
body: |-
{
"messages": [
{
"role": "user",
"content": "Say this is a test"
}
],
"model": "gpt-4o-mini",
"stream": false
}
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
authorization:
- Bearer test_openai_api_key
connection:
- keep-alive
content-length:
- '106'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.54.3
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.54.3
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.6
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: |-
{
"id": "chatcmpl-ASYMQRl3A3DXL9FWCK9tnGRcKIO7q",
"object": "chat.completion",
"created": 1731368630,
"model": "gpt-4o-mini-2024-07-18",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "This is a test.",
"refusal": null
},
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 12,
"completion_tokens": 5,
"total_tokens": 17,
"prompt_tokens_details": {
"cached_tokens": 0,
"audio_tokens": 0
},
"completion_tokens_details": {
"reasoning_tokens": 0,
"audio_tokens": 0,
"accepted_prediction_tokens": 0,
"rejected_prediction_tokens": 0
}
},
"system_fingerprint": "fp_0ba0d124f1"
}
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8e122593ff368bc8-SIN
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Mon, 11 Nov 2024 23:43:50 GMT
Server:
- cloudflare
Set-Cookie: test_set_cookie
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
content-length:
- '765'
openai-organization: test_openai_org_id
openai-processing-ms:
- '287'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '200000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '199977'
x-ratelimit-reset-requests:
- 8.64s
x-ratelimit-reset-tokens:
- 6ms
x-request-id:
- req_58cff97afd0e7c0bba910ccf0b044a6f
status:
code: 200
message: OK
version: 1
Loading