Skip to content

Commit fb1bc57

Browse files
committed
feat(langchain): update integration to use gen_ai.* instead of ai.*
1 parent 19914cd commit fb1bc57

File tree

4 files changed

+61
-32
lines changed

4 files changed

+61
-32
lines changed

sentry_sdk/ai/monitoring.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
if TYPE_CHECKING:
1313
from typing import Optional, Callable, Any
1414

15-
_ai_pipeline_name = ContextVar("ai_pipeline_name", default=None)
15+
_ai_pipeline_name = ContextVar(SPANDATA.GEN_AI_PIPELINE_NAME, default=None)
1616

1717

1818
def set_ai_pipeline_name(name):

sentry_sdk/consts.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -474,6 +474,11 @@ class SPANDATA:
474474
Example: "COMPLETE"
475475
"""
476476

477+
GEN_AI_RESPONSE_FORMAT = "gen_ai.response.format"
478+
"""
479+
For an AI model call, the format of the response
480+
"""
481+
477482
GEN_AI_RESPONSE_ID = "gen_ai.response.id"
478483
"""
479484
Unique identifier for the completion.
@@ -515,6 +520,11 @@ class SPANDATA:
515520
The frequency penalty parameter used to reduce repetitiveness of generated tokens.
516521
Example: 0.1
517522
"""
523+
GEN_AI_REQUEST_LOGIT_BIAS = "gen_ai.logit_bias"
524+
"""
525+
The logit bias parameter used to control the model's response.
526+
Example: {"12345": -100}
527+
"""
518528

519529
GEN_AI_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens"
520530
"""
@@ -546,6 +556,12 @@ class SPANDATA:
546556
Example: "1234567890"
547557
"""
548558

559+
GEN_AI_REQUEST_TAGS = "gen_ai.request.tags"
560+
"""
561+
The tags passed to the model.
562+
Example: {"tag1": "value1", "tag2": "value2"}
563+
"""
564+
549565
GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature"
550566
"""
551567
The temperature parameter used to control randomness in the output.

sentry_sdk/integrations/langchain.py

Lines changed: 30 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -32,17 +32,18 @@
3232

3333

3434
DATA_FIELDS = {
35-
"temperature": SPANDATA.AI_TEMPERATURE,
36-
"top_p": SPANDATA.AI_TOP_P,
37-
"top_k": SPANDATA.AI_TOP_K,
38-
"function_call": SPANDATA.AI_FUNCTION_CALL,
39-
"tool_calls": SPANDATA.AI_TOOL_CALLS,
40-
"tools": SPANDATA.AI_TOOLS,
41-
"response_format": SPANDATA.AI_RESPONSE_FORMAT,
42-
"logit_bias": SPANDATA.AI_LOGIT_BIAS,
43-
"tags": SPANDATA.AI_TAGS,
35+
"temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
36+
"top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
37+
"top_k": SPANDATA.GEN_AI_REQUEST_TOP_K,
38+
"function_call": SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
39+
"tool_calls": SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
40+
"tools": SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
41+
"response_format": SPANDATA.GEN_AI_RESPONSE_FORMAT,
42+
"logit_bias": SPANDATA.GEN_AI_REQUEST_LOGIT_BIAS,
43+
"tags": SPANDATA.GEN_AI_REQUEST_TAGS,
4444
}
4545

46+
# TODO(shellmayr): is this still the case?
4647
# To avoid double collecting tokens, we do *not* measure
4748
# token counts for models for which we have an explicit integration
4849
NO_COLLECT_TOKEN_MODELS = [
@@ -191,7 +192,7 @@ def on_llm_start(
191192
)
192193
span = watched_span.span
193194
if should_send_default_pii() and self.include_prompts:
194-
set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, prompts)
195+
set_data_normalized(span, SPANDATA.GEN_AI_REQUEST_MESSAGES, prompts)
195196
for k, v in DATA_FIELDS.items():
196197
if k in all_params:
197198
set_data_normalized(span, v, all_params[k])
@@ -222,11 +223,11 @@ def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs):
222223
if not model and "anthropic" in all_params.get("_type"):
223224
model = "claude-2"
224225
if model:
225-
span.set_data(SPANDATA.AI_MODEL_ID, model)
226+
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model)
226227
if should_send_default_pii() and self.include_prompts:
227228
set_data_normalized(
228229
span,
229-
SPANDATA.AI_INPUT_MESSAGES,
230+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
230231
[
231232
[self._normalize_langchain_message(x) for x in list_]
232233
for list_ in messages
@@ -271,7 +272,7 @@ def on_llm_end(self, response, *, run_id, **kwargs):
271272
if should_send_default_pii() and self.include_prompts:
272273
set_data_normalized(
273274
span_data.span,
274-
SPANDATA.AI_RESPONSES,
275+
SPANDATA.GEN_AI_RESPONSE_TEXT,
275276
[[x.text for x in list_] for list_ in response.generations],
276277
)
277278

@@ -317,7 +318,9 @@ def on_chain_start(self, serialized, inputs, *, run_id, **kwargs):
317318
)
318319
metadata = kwargs.get("metadata")
319320
if metadata:
320-
set_data_normalized(watched_span.span, SPANDATA.AI_METADATA, metadata)
321+
set_data_normalized(
322+
watched_span.span, SPANDATA.GEN_AI_REQUEST_METADATA, metadata
323+
)
321324

322325
def on_chain_end(self, outputs, *, run_id, **kwargs):
323326
# type: (SentryLangchainCallback, Dict[str, Any], UUID, Any) -> Any
@@ -350,7 +353,9 @@ def on_agent_action(self, action, *, run_id, **kwargs):
350353
)
351354
if action.tool_input and should_send_default_pii() and self.include_prompts:
352355
set_data_normalized(
353-
watched_span.span, SPANDATA.AI_INPUT_MESSAGES, action.tool_input
356+
watched_span.span,
357+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
358+
action.tool_input,
354359
)
355360

356361
def on_agent_finish(self, finish, *, run_id, **kwargs):
@@ -364,7 +369,9 @@ def on_agent_finish(self, finish, *, run_id, **kwargs):
364369
return
365370
if should_send_default_pii() and self.include_prompts:
366371
set_data_normalized(
367-
span_data.span, SPANDATA.AI_RESPONSES, finish.return_values.items()
372+
span_data.span,
373+
SPANDATA.GEN_AI_RESPONSE_TEXT,
374+
finish.return_values.items(),
368375
)
369376
self._exit_span(span_data, run_id)
370377

@@ -384,12 +391,14 @@ def on_tool_start(self, serialized, input_str, *, run_id, **kwargs):
384391
if should_send_default_pii() and self.include_prompts:
385392
set_data_normalized(
386393
watched_span.span,
387-
SPANDATA.AI_INPUT_MESSAGES,
394+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
388395
kwargs.get("inputs", [input_str]),
389396
)
390397
if kwargs.get("metadata"):
391398
set_data_normalized(
392-
watched_span.span, SPANDATA.AI_METADATA, kwargs.get("metadata")
399+
watched_span.span,
400+
SPANDATA.GEN_AI_REQUEST_METADATA,
401+
kwargs.get("metadata"),
393402
)
394403

395404
def on_tool_end(self, output, *, run_id, **kwargs):
@@ -403,7 +412,9 @@ def on_tool_end(self, output, *, run_id, **kwargs):
403412
if not span_data:
404413
return
405414
if should_send_default_pii() and self.include_prompts:
406-
set_data_normalized(span_data.span, SPANDATA.AI_RESPONSES, output)
415+
set_data_normalized(
416+
span_data.span, SPANDATA.GEN_AI_RESPONSE_TEXT, output
417+
)
407418
self._exit_span(span_data, run_id)
408419

409420
def on_tool_error(self, error, *args, run_id, **kwargs):

tests/integrations/langchain/test_langchain.py

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -196,22 +196,24 @@ def test_langchain_agent(
196196

197197
if send_default_pii and include_prompts:
198198
assert (
199-
"You are very powerful" in chat_spans[0]["data"][SPANDATA.AI_INPUT_MESSAGES]
199+
"You are very powerful"
200+
in chat_spans[0]["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]
200201
)
201-
assert "5" in chat_spans[0]["data"][SPANDATA.AI_RESPONSES]
202-
assert "word" in tool_exec_span["data"][SPANDATA.AI_INPUT_MESSAGES]
203-
assert 5 == int(tool_exec_span["data"][SPANDATA.AI_RESPONSES])
202+
assert "5" in chat_spans[0]["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]
203+
assert "word" in tool_exec_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]
204+
assert 5 == int(tool_exec_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT])
204205
assert (
205-
"You are very powerful" in chat_spans[1]["data"][SPANDATA.AI_INPUT_MESSAGES]
206+
"You are very powerful"
207+
in chat_spans[1]["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]
206208
)
207-
assert "5" in chat_spans[1]["data"][SPANDATA.AI_RESPONSES]
209+
assert "5" in chat_spans[1]["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]
208210
else:
209-
assert SPANDATA.AI_INPUT_MESSAGES not in chat_spans[0].get("data", {})
210-
assert SPANDATA.AI_RESPONSES not in chat_spans[0].get("data", {})
211-
assert SPANDATA.AI_INPUT_MESSAGES not in chat_spans[1].get("data", {})
212-
assert SPANDATA.AI_RESPONSES not in chat_spans[1].get("data", {})
213-
assert SPANDATA.AI_INPUT_MESSAGES not in tool_exec_span.get("data", {})
214-
assert SPANDATA.AI_RESPONSES not in tool_exec_span.get("data", {})
211+
assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get("data", {})
212+
assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("data", {})
213+
assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[1].get("data", {})
214+
assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[1].get("data", {})
215+
assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in tool_exec_span.get("data", {})
216+
assert SPANDATA.GEN_AI_RESPONSE_TEXT not in tool_exec_span.get("data", {})
215217

216218

217219
def test_langchain_error(sentry_init, capture_events):

0 commit comments

Comments
 (0)