Skip to content

Commit 485ae4d

Browse files
committed
More data on chat span
1 parent ae5b3f2 commit 485ae4d

File tree

1 file changed

+64
-26
lines changed

1 file changed

+64
-26
lines changed

sentry_sdk/integrations/langchain.py

Lines changed: 64 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ def _handle_error(self, run_id, error):
102102
if not run_id or run_id not in self.span_map:
103103
return
104104

105-
span_data = self.span_map[run_id]
105+
span_data = self.span_map.get(run_id)
106106
if not span_data:
107107
return
108108
sentry_sdk.capture_exception(error, span_data.span.scope)
@@ -328,9 +328,6 @@ def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs):
328328
def on_chat_model_end(self, response, *, run_id, **kwargs):
329329
# type: (SentryLangchainCallback, LLMResult, UUID, Any) -> Any
330330
"""Run when Chat Model ends running."""
331-
import ipdb
332-
333-
ipdb.set_trace()
334331
with capture_internal_exceptions():
335332
if not run_id:
336333
return
@@ -353,7 +350,7 @@ def on_chat_model_end(self, response, *, run_id, **kwargs):
353350
elif hasattr(response, "usage_metadata"):
354351
token_usage = response.usage_metadata
355352

356-
span_data = self.span_map[run_id]
353+
span_data = self.span_map.get(run_id)
357354
if not span_data:
358355
return
359356

@@ -401,6 +398,55 @@ def on_llm_end(self, response, *, run_id, **kwargs):
401398
if not run_id:
402399
return
403400

401+
span_data = self.span_map.get(run_id)
402+
if not span_data:
403+
return
404+
405+
span = span_data.span
406+
407+
try:
408+
generation_result = response.generations[0][0]
409+
except IndexError:
410+
generation_result = None
411+
412+
if generation_result is not None:
413+
try:
414+
response_model = generation_result.generation_info.get("model_name")
415+
if response_model is not None:
416+
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model)
417+
except AttributeError:
418+
pass
419+
420+
try:
421+
finish_reason = generation_result.generation_info.get(
422+
"finish_reason"
423+
)
424+
if finish_reason is not None:
425+
span.set_data(
426+
SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, finish_reason
427+
)
428+
except AttributeError:
429+
pass
430+
431+
try:
432+
tool_calls = generation_result.message.get("tool_calls")
433+
if tool_calls is not None:
434+
set_data_normalized(
435+
span,
436+
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
437+
tool_calls,
438+
unpack=False,
439+
)
440+
except AttributeError:
441+
pass
442+
443+
if should_send_default_pii() and self.include_prompts:
444+
set_data_normalized(
445+
span_data.span,
446+
SPANDATA.GEN_AI_RESPONSE_TEXT,
447+
[[x.text for x in list_] for list_ in response.generations],
448+
)
449+
404450
token_usage = None
405451
if response.llm_output and "token_usage" in response.llm_output:
406452
token_usage = response.llm_output["token_usage"]
@@ -414,17 +460,6 @@ def on_llm_end(self, response, *, run_id, **kwargs):
414460
elif hasattr(response, "token_usage"):
415461
token_usage = response.token_usage
416462

417-
span_data = self.span_map[run_id]
418-
if not span_data:
419-
return
420-
421-
if should_send_default_pii() and self.include_prompts:
422-
set_data_normalized(
423-
span_data.span,
424-
SPANDATA.GEN_AI_RESPONSE_TEXT,
425-
[[x.text for x in list_] for list_ in response.generations],
426-
)
427-
428463
if token_usage:
429464
input_tokens, output_tokens, total_tokens = self._extract_token_usage(
430465
token_usage
@@ -440,7 +475,7 @@ def on_llm_end(self, response, *, run_id, **kwargs):
440475
or total_tokens is not None
441476
):
442477
record_token_usage(
443-
span_data.span,
478+
span,
444479
input_tokens=input_tokens,
445480
output_tokens=output_tokens,
446481
total_tokens=total_tokens,
@@ -457,9 +492,6 @@ def on_llm_error(self, error, *, run_id, **kwargs):
457492
def on_chat_model_error(self, error, *, run_id, **kwargs):
458493
# type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any
459494
"""Run when Chat Model errors."""
460-
import ipdb
461-
462-
ipdb.set_trace()
463495
with capture_internal_exceptions():
464496
self._handle_error(run_id, error)
465497

@@ -475,7 +507,7 @@ def on_chain_end(self, outputs, *, run_id, **kwargs):
475507
if not run_id or run_id not in self.span_map:
476508
return
477509

478-
span_data = self.span_map[run_id]
510+
span_data = self.span_map.get(run_id)
479511
if not span_data:
480512
return
481513
self._exit_span(span_data, run_id)
@@ -487,15 +519,22 @@ def on_chain_error(self, error, *, run_id, **kwargs):
487519

488520
def on_agent_action(self, action, *, run_id, **kwargs):
489521
# type: (SentryLangchainCallback, AgentAction, UUID, Any) -> Any
490-
pass
522+
with capture_internal_exceptions():
523+
if not run_id or run_id not in self.span_map:
524+
return
525+
526+
span_data = self.span_map.get(run_id)
527+
if not span_data:
528+
return
529+
self._exit_span(span_data, run_id)
491530

492531
def on_agent_finish(self, finish, *, run_id, **kwargs):
493532
# type: (SentryLangchainCallback, AgentFinish, UUID, Any) -> Any
494533
with capture_internal_exceptions():
495534
if not run_id:
496535
return
497536

498-
span_data = self.span_map[run_id]
537+
span_data = self.span_map.get(run_id)
499538
if not span_data:
500539
return
501540
if should_send_default_pii() and self.include_prompts:
@@ -545,7 +584,7 @@ def on_tool_end(self, output, *, run_id, **kwargs):
545584
if not run_id or run_id not in self.span_map:
546585
return
547586

548-
span_data = self.span_map[run_id]
587+
span_data = self.span_map.get(run_id)
549588
if not span_data:
550589
return
551590
if should_send_default_pii() and self.include_prompts:
@@ -557,7 +596,7 @@ def on_tool_error(self, error, *args, run_id, **kwargs):
557596
"""Run when tool errors."""
558597
# TODO(shellmayr): how to correctly set the status when the tool fails?
559598
if run_id and run_id in self.span_map:
560-
span_data = self.span_map[run_id]
599+
span_data = self.span_map.get(run_id)
561600
if span_data:
562601
span_data.span.set_status("unknown")
563602

@@ -654,7 +693,6 @@ def _wrap_agent_executor_invoke(f):
654693
@wraps(f)
655694
def new_invoke(self, *args, **kwargs):
656695
# type: (Any, Any, Any) -> Any
657-
658696
integration = sentry_sdk.get_client().get_integration(LangchainIntegration)
659697
if integration is None:
660698
return f(self, *args, **kwargs)

0 commit comments

Comments
 (0)