Skip to content

Commit 4a6c239

Browse files
committed
Merge branch 'master' into potel-base
2 parents 65eaf38 + c273f67 commit 4a6c239

File tree

18 files changed

+1156
-306
lines changed

18 files changed

+1156
-306
lines changed

CHANGELOG.md

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,41 @@ for your feedback. How was the migration? Is everything working as expected? Is
2020
[on GitHub](https://github.com/getsentry/sentry-python/discussions/3936) or
2121
[on Discord](https://discord.com/invite/Ww9hbqr).
2222

23+
## 2.34.0
24+
25+
### Various fixes & improvements
26+
27+
- Considerably raise `DEFAULT_MAX_VALUE_LENGTH` (#4632) by @sentrivana
28+
29+
We have increased the string trimming limit considerably, allowing you to see more data
30+
without it being truncated. Note that this might, in rare cases, result in issue regrouping,
31+
for example if you're capturing message events with very long messages (longer than the
32+
default 1024 characters/bytes).
33+
34+
If you want to adjust the limit, you can set a
35+
[`max_value_limit`](https://docs.sentry.io/platforms/python/configuration/options/#max_value_length)
36+
in your `sentry_sdk.init()`.
37+
38+
- `OpenAI` integration update (#4612) by @antonpirker
39+
40+
The `OpenAIIntegration` now supports [OpenAI Responses API](https://platform.openai.com/docs/api-reference/responses).
41+
42+
The data captured will also show up in the new [AI Agents Dashboard](https://docs.sentry.io/product/insights/agents/dashboard/).
43+
44+
This works out of the box, nothing to do on your side.
45+
46+
- Expose `set_transaction_name` (#4634) by @sl0thentr0py
47+
- Fix(Celery): Latency should be in milliseconds, not seconds (#4637) by @sentrivana
48+
- Fix(Django): Treat `django.template.context.BasicContext` as sequence in serializer (#4621) by @sl0thentr0py
49+
- Fix(Huggingface): Fix `huggingface_hub` CI tests. (#4619) by @antonpirker
50+
- Fix: Ignore deliberate thread exception warnings (#4611) by @sl0thentr0py
51+
- Fix: Socket tests to not use example.com (#4627) by @sl0thentr0py
52+
- Fix: Threading run patch (#4610) by @sl0thentr0py
53+
- Tests: Simplify celery double patching test (#4626) by @sl0thentr0py
54+
- Tests: Remove remote example.com calls (#4622) by @sl0thentr0py
55+
- Tests: tox.ini update (#4635) by @sentrivana
56+
- Tests: Update tox (#4609) by @sentrivana
57+
2358
## 2.33.2
2459

2560
### Various fixes & improvements

sentry_sdk/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@
4747
"logger",
4848
"start_session",
4949
"end_session",
50+
"set_transaction_name",
5051
]
5152

5253
# Initialize the debug support after everything is loaded

sentry_sdk/ai/monitoring.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def sync_wrapped(*args: Any, **kwargs: Any) -> Any:
3838
for k, v in kwargs.pop("sentry_data", {}).items():
3939
span.set_attribute(k, v)
4040
if curr_pipeline:
41-
span.set_attribute(SPANDATA.AI_PIPELINE_NAME, curr_pipeline)
41+
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, curr_pipeline)
4242
return f(*args, **kwargs)
4343
else:
4444
_ai_pipeline_name.set(description)
@@ -68,7 +68,7 @@ async def async_wrapped(*args: Any, **kwargs: Any) -> Any:
6868
for k, v in kwargs.pop("sentry_data", {}).items():
6969
span.set_attribute(k, v)
7070
if curr_pipeline:
71-
span.set_attribute(SPANDATA.AI_PIPELINE_NAME, curr_pipeline)
71+
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, curr_pipeline)
7272
return await f(*args, **kwargs)
7373
else:
7474
_ai_pipeline_name.set(description)
@@ -105,7 +105,7 @@ def record_token_usage(
105105
# TODO: move pipeline name elsewhere
106106
ai_pipeline_name = get_ai_pipeline_name()
107107
if ai_pipeline_name:
108-
span.set_attribute(SPANDATA.AI_PIPELINE_NAME, ai_pipeline_name)
108+
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, ai_pipeline_name)
109109

110110
if input_tokens is not None:
111111
span.set_attribute(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)

sentry_sdk/api.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@
7474
"use_isolation_scope",
7575
"start_session",
7676
"end_session",
77+
"set_transaction_name",
7778
]
7879

7980

@@ -325,3 +326,9 @@ def start_session(
325326
@scopemethod
326327
def end_session() -> None:
327328
return get_isolation_scope().end_session()
329+
330+
331+
@scopemethod
332+
def set_transaction_name(name, source=None):
333+
# type: (str, Optional[str]) -> None
334+
return get_current_scope().set_transaction_name(name, source)

sentry_sdk/consts.py

Lines changed: 27 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,10 @@
1717
)
1818

1919
# up top to prevent circular import due to integration import
20-
DEFAULT_MAX_VALUE_LENGTH = 1024
20+
# This is more or less an arbitrary large-ish value for now, so that we allow
21+
# pretty long strings (like LLM prompts), but still have *some* upper limit
22+
# until we verify that removing the trimming completely is safe.
23+
DEFAULT_MAX_VALUE_LENGTH = 100_000
2124

2225
DEFAULT_MAX_STACK_FRAMES = 100
2326
DEFAULT_ADD_FULL_STACK = False
@@ -167,6 +170,7 @@ class SPANDATA:
167170
AI_PIPELINE_NAME = "ai.pipeline.name"
168171
"""
169172
Name of the AI pipeline or chain being executed.
173+
DEPRECATED: Use GEN_AI_PIPELINE_NAME instead.
170174
Example: "qa-pipeline"
171175
"""
172176

@@ -233,6 +237,7 @@ class SPANDATA:
233237
AI_STREAMING = "ai.streaming"
234238
"""
235239
Whether or not the AI model call's response was streamed back asynchronously
240+
DEPRECATED: Use GEN_AI_RESPONSE_STREAMING instead.
236241
Example: true
237242
"""
238243

@@ -382,6 +387,24 @@ class SPANDATA:
382387
Example: "chat"
383388
"""
384389

390+
GEN_AI_PIPELINE_NAME = "gen_ai.pipeline.name"
391+
"""
392+
Name of the AI pipeline or chain being executed.
393+
Example: "qa-pipeline"
394+
"""
395+
396+
GEN_AI_RESPONSE_MODEL = "gen_ai.response.model"
397+
"""
398+
Exact model identifier used to generate the response
399+
Example: gpt-4o-mini-2024-07-18
400+
"""
401+
402+
GEN_AI_RESPONSE_STREAMING = "gen_ai.response.streaming"
403+
"""
404+
Whether or not the AI model call's response was streamed back asynchronously
405+
Example: true
406+
"""
407+
385408
GEN_AI_RESPONSE_TEXT = "gen_ai.response.text"
386409
"""
387410
The model's response text messages.
@@ -421,7 +444,7 @@ class SPANDATA:
421444
GEN_AI_REQUEST_MODEL = "gen_ai.request.model"
422445
"""
423446
The model identifier being used for the request.
424-
Example: "gpt-4-turbo-preview"
447+
Example: "gpt-4-turbo"
425448
"""
426449

427450
GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty"
@@ -659,9 +682,11 @@ class OP:
659682
FUNCTION_AWS = "function.aws"
660683
FUNCTION_GCP = "function.gcp"
661684
GEN_AI_CHAT = "gen_ai.chat"
685+
GEN_AI_EMBEDDINGS = "gen_ai.embeddings"
662686
GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
663687
GEN_AI_HANDOFF = "gen_ai.handoff"
664688
GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent"
689+
GEN_AI_RESPONSES = "gen_ai.responses"
665690
GRAPHQL_EXECUTE = "graphql.execute"
666691
GRAPHQL_MUTATION = "graphql.mutation"
667692
GRAPHQL_PARSE = "graphql.parse"
@@ -686,8 +711,6 @@ class OP:
686711
MIDDLEWARE_STARLITE = "middleware.starlite"
687712
MIDDLEWARE_STARLITE_RECEIVE = "middleware.starlite.receive"
688713
MIDDLEWARE_STARLITE_SEND = "middleware.starlite.send"
689-
OPENAI_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.openai"
690-
OPENAI_EMBEDDINGS_CREATE = "ai.embeddings.create.openai"
691714
HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE = (
692715
"ai.chat_completions.create.huggingface_hub"
693716
)

sentry_sdk/integrations/celery/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -369,6 +369,7 @@ def _inner(*args: Any, **kwargs: Any) -> Any:
369369
)
370370

371371
if latency is not None:
372+
latency *= 1000 # milliseconds
372373
span.set_attribute(
373374
SPANDATA.MESSAGING_MESSAGE_RECEIVE_LATENCY, latency
374375
)

0 commit comments

Comments
 (0)