Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,40 @@
# Changelog

## 2.34.0

### Various fixes & improvements

- Considerably raise `DEFAULT_MAX_VALUE_LENGTH` (#4632) by @sentrivana

We have increased the string trimming limit considerably, allowing you to see more data
without it being truncated. Note that this might, in rare cases, result in issue regrouping,
for example if you're capturing message events with very long messages (longer than the
default 1024 characters/bytes).

If you want to adjust the limit, you can set a
[`max_value_limit`](https://docs.sentry.io/platforms/python/configuration/options/#max_value_length)
in your `sentry_sdk.init()`.

- `OpenAI` integration update (#4612) by @antonpirker

The `OpenAIIntegration` now supports [OpenAI Responses API](https://platform.openai.com/docs/api-reference/responses).

The data captured will also show up in the new [AI Agents Dashboard](https://docs.sentry.io/product/insights/agents/dashboard/).

This works out of the box, nothing to do on your side.

- Expose `set_transaction_name` (#4634) by @sl0thentr0py
- Fix(Celery): Latency should be in milliseconds, not seconds (#4637) by @sentrivana
- Fix(Django): Treat `django.template.context.BasicContext` as sequence in serializer (#4621) by @sl0thentr0py
- Fix(Huggingface): Fix `huggingface_hub` CI tests. (#4619) by @antonpirker
- Fix: Ignore deliberate thread exception warnings (#4611) by @sl0thentr0py
- Fix: Socket tests to not use example.com (#4627) by @sl0thentr0py
- Fix: Threading run patch (#4610) by @sl0thentr0py
- Tests: Simplify celery double patching test (#4626) by @sl0thentr0py
- Tests: Remove remote example.com calls (#4622) by @sl0thentr0py
- Tests: tox.ini update (#4635) by @sentrivana
- Tests: Update tox (#4609) by @sentrivana

## 2.33.2

### Various fixes & improvements
Expand Down
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
copyright = "2019-{}, Sentry Team and Contributors".format(datetime.now().year)
author = "Sentry Team and Contributors"

release = "2.33.2"
release = "2.34.0"
version = ".".join(release.split(".")[:2]) # The short X.Y version.


Expand Down
1 change: 1 addition & 0 deletions sentry_sdk/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
"logger",
"start_session",
"end_session",
"set_transaction_name",
]

# Initialize the debug support after everything is loaded
Expand Down
6 changes: 3 additions & 3 deletions sentry_sdk/ai/monitoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def sync_wrapped(*args, **kwargs):
for k, v in kwargs.pop("sentry_data", {}).items():
span.set_data(k, v)
if curr_pipeline:
span.set_data(SPANDATA.AI_PIPELINE_NAME, curr_pipeline)
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, curr_pipeline)
return f(*args, **kwargs)
else:
_ai_pipeline_name.set(description)
Expand Down Expand Up @@ -69,7 +69,7 @@ async def async_wrapped(*args, **kwargs):
for k, v in kwargs.pop("sentry_data", {}).items():
span.set_data(k, v)
if curr_pipeline:
span.set_data(SPANDATA.AI_PIPELINE_NAME, curr_pipeline)
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, curr_pipeline)
return await f(*args, **kwargs)
else:
_ai_pipeline_name.set(description)
Expand Down Expand Up @@ -108,7 +108,7 @@ def record_token_usage(
# TODO: move pipeline name elsewhere
ai_pipeline_name = get_ai_pipeline_name()
if ai_pipeline_name:
span.set_data(SPANDATA.AI_PIPELINE_NAME, ai_pipeline_name)
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, ai_pipeline_name)

if input_tokens is not None:
span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
Expand Down
7 changes: 7 additions & 0 deletions sentry_sdk/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ def overload(x):
"monitor",
"start_session",
"end_session",
"set_transaction_name",
]


Expand Down Expand Up @@ -466,3 +467,9 @@ def start_session(
def end_session():
# type: () -> None
return get_isolation_scope().end_session()


@scopemethod
def set_transaction_name(name, source=None):
# type: (str, Optional[str]) -> None
return get_current_scope().set_transaction_name(name, source)
33 changes: 28 additions & 5 deletions sentry_sdk/consts.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,10 @@
from typing import TYPE_CHECKING

# up top to prevent circular import due to integration import
DEFAULT_MAX_VALUE_LENGTH = 1024
# This is more or less an arbitrary large-ish value for now, so that we allow
# pretty long strings (like LLM prompts), but still have *some* upper limit
# until we verify that removing the trimming completely is safe.
DEFAULT_MAX_VALUE_LENGTH = 100_000

DEFAULT_MAX_STACK_FRAMES = 100
DEFAULT_ADD_FULL_STACK = False
Expand Down Expand Up @@ -169,6 +172,7 @@ class SPANDATA:
AI_PIPELINE_NAME = "ai.pipeline.name"
"""
Name of the AI pipeline or chain being executed.
DEPRECATED: Use GEN_AI_PIPELINE_NAME instead.
Example: "qa-pipeline"
"""

Expand Down Expand Up @@ -229,6 +233,7 @@ class SPANDATA:
AI_STREAMING = "ai.streaming"
"""
Whether or not the AI model call's response was streamed back asynchronously
DEPRECATED: Use GEN_AI_RESPONSE_STREAMING instead.
Example: true
"""

Expand Down Expand Up @@ -372,6 +377,24 @@ class SPANDATA:
Example: "chat"
"""

GEN_AI_PIPELINE_NAME = "gen_ai.pipeline.name"
"""
Name of the AI pipeline or chain being executed.
Example: "qa-pipeline"
"""

GEN_AI_RESPONSE_MODEL = "gen_ai.response.model"
"""
Exact model identifier used to generate the response
Example: gpt-4o-mini-2024-07-18
"""

GEN_AI_RESPONSE_STREAMING = "gen_ai.response.streaming"
"""
Whether or not the AI model call's response was streamed back asynchronously
Example: true
"""

GEN_AI_RESPONSE_TEXT = "gen_ai.response.text"
"""
The model's response text messages.
Expand Down Expand Up @@ -411,7 +434,7 @@ class SPANDATA:
GEN_AI_REQUEST_MODEL = "gen_ai.request.model"
"""
The model identifier being used for the request.
Example: "gpt-4-turbo-preview"
Example: "gpt-4-turbo"
"""

GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty"
Expand Down Expand Up @@ -649,9 +672,11 @@ class OP:
FUNCTION_AWS = "function.aws"
FUNCTION_GCP = "function.gcp"
GEN_AI_CHAT = "gen_ai.chat"
GEN_AI_EMBEDDINGS = "gen_ai.embeddings"
GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
GEN_AI_HANDOFF = "gen_ai.handoff"
GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent"
GEN_AI_RESPONSES = "gen_ai.responses"
GRAPHQL_EXECUTE = "graphql.execute"
GRAPHQL_MUTATION = "graphql.mutation"
GRAPHQL_PARSE = "graphql.parse"
Expand All @@ -674,8 +699,6 @@ class OP:
MIDDLEWARE_STARLITE = "middleware.starlite"
MIDDLEWARE_STARLITE_RECEIVE = "middleware.starlite.receive"
MIDDLEWARE_STARLITE_SEND = "middleware.starlite.send"
OPENAI_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.openai"
OPENAI_EMBEDDINGS_CREATE = "ai.embeddings.create.openai"
HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE = (
"ai.chat_completions.create.huggingface_hub"
)
Expand Down Expand Up @@ -1181,4 +1204,4 @@ def _get_default_options():
del _get_default_options


VERSION = "2.33.2"
VERSION = "2.34.0"
1 change: 1 addition & 0 deletions sentry_sdk/integrations/celery/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,7 @@ def _inner(*args, **kwargs):
)

if latency is not None:
latency *= 1000 # milliseconds
span.set_data(SPANDATA.MESSAGING_MESSAGE_RECEIVE_LATENCY, latency)

with capture_internal_exceptions():
Expand Down
Loading
Loading