Skip to content
Merged
Show file tree
Hide file tree
Changes from 29 commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
f2edb38
Fixed span names to be otel compatible
antonpirker Jul 23, 2025
a29716b
Add common otel attributes
antonpirker Jul 23, 2025
5b11b7f
attributes on chat completion
antonpirker Jul 23, 2025
2f04a23
organize code
antonpirker Jul 23, 2025
6b2f5de
refactor embeddings
antonpirker Jul 23, 2025
ac4f406
cleanup respones
antonpirker Jul 23, 2025
b54b8dc
Add available tools
antonpirker Jul 23, 2025
1bbb772
updated some tests
antonpirker Jul 23, 2025
c80a413
fixed tests
antonpirker Jul 24, 2025
3ad5e50
cleanup
antonpirker Jul 24, 2025
dddaab2
fix token count for streaming responses api
antonpirker Jul 24, 2025
a83fca9
fixed streaming responses token count
antonpirker Jul 24, 2025
d291a86
Merge branch 'antonpirker/openai-overhaul' into antonpirker/openai-ot…
antonpirker Jul 25, 2025
c589030
typing
antonpirker Jul 25, 2025
45300bf
typing
antonpirker Jul 25, 2025
b5dd115
More tests
antonpirker Jul 25, 2025
2c5d1f0
import ordering
antonpirker Jul 25, 2025
f629b0d
tests
antonpirker Jul 25, 2025
d76a563
tests
antonpirker Jul 25, 2025
f9dfe5e
Rename pipeline name
antonpirker Jul 28, 2025
14baf6c
Rename streaming attribute
antonpirker Jul 28, 2025
f2cab2d
Apply suggestions from code review
antonpirker Jul 28, 2025
d13a5d0
review feedback
antonpirker Jul 28, 2025
157f95a
Review feedback
antonpirker Jul 28, 2025
0aa26eb
resilience
antonpirker Jul 28, 2025
e04174e
steaming
antonpirker Jul 28, 2025
eb629de
do not calculate token usage twice
antonpirker Jul 28, 2025
477a03c
Merge branch 'antonpirker/openai-otel-2' into antonpirker/openai-pipe…
antonpirker Jul 28, 2025
1f4ed0c
updated test
antonpirker Jul 28, 2025
bd4250e
Merge branch 'antonpirker/openai-overhaul' into antonpirker/openai-pi…
antonpirker Jul 29, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions sentry_sdk/ai/monitoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def sync_wrapped(*args, **kwargs):
for k, v in kwargs.pop("sentry_data", {}).items():
span.set_data(k, v)
if curr_pipeline:
span.set_data(SPANDATA.AI_PIPELINE_NAME, curr_pipeline)
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, curr_pipeline)
return f(*args, **kwargs)
else:
_ai_pipeline_name.set(description)
Expand Down Expand Up @@ -69,7 +69,7 @@ async def async_wrapped(*args, **kwargs):
for k, v in kwargs.pop("sentry_data", {}).items():
span.set_data(k, v)
if curr_pipeline:
span.set_data(SPANDATA.AI_PIPELINE_NAME, curr_pipeline)
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, curr_pipeline)
return await f(*args, **kwargs)
else:
_ai_pipeline_name.set(description)
Expand Down Expand Up @@ -108,7 +108,7 @@ def record_token_usage(
# TODO: move pipeline name elsewhere
ai_pipeline_name = get_ai_pipeline_name()
if ai_pipeline_name:
span.set_data(SPANDATA.AI_PIPELINE_NAME, ai_pipeline_name)
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, ai_pipeline_name)

if input_tokens is not None:
span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
Expand Down
16 changes: 15 additions & 1 deletion sentry_sdk/consts.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,7 @@ class SPANDATA:
AI_PIPELINE_NAME = "ai.pipeline.name"
"""
Name of the AI pipeline or chain being executed.
DEPRECATED: Use GEN_AI_PIPELINE_NAME instead.
Example: "qa-pipeline"
"""

Expand Down Expand Up @@ -229,6 +230,7 @@ class SPANDATA:
AI_STREAMING = "ai.streaming"
"""
Whether or not the AI model call's response was streamed back asynchronously
DEPRECATED: Use GEN_AI_RESPONSE_STREAMING instead.
Example: true
"""

Expand Down Expand Up @@ -372,12 +374,24 @@ class SPANDATA:
Example: "chat"
"""

GEN_AI_PIPELINE_NAME = "gen_ai.pipeline.name"
"""
Name of the AI pipeline or chain being executed.
Example: "qa-pipeline"
"""

GEN_AI_RESPONSE_MODEL = "gen_ai.response.model"
"""
Exact model identifier used to generate the response
Example: gpt-4o-mini-2024-07-18
"""

GEN_AI_RESPONSE_STREAMING = "gen_ai.response.streaming"
"""
Whether or not the AI model call's response was streamed back asynchronously
Example: true
"""

GEN_AI_RESPONSE_TEXT = "gen_ai.response.text"
"""
The model's response text messages.
Expand Down Expand Up @@ -417,7 +431,7 @@ class SPANDATA:
GEN_AI_REQUEST_MODEL = "gen_ai.request.model"
"""
The model identifier being used for the request.
Example: "gpt-4-turbo-preview"
Example: "gpt-4-turbo"
"""

GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty"
Expand Down
Loading
Loading