|
27 | 27 | Callbacks, |
28 | 28 | ) |
29 | 29 | from langchain_core.agents import AgentAction, AgentFinish |
| 30 | + from langchain.agents import AgentExecutor |
| 31 | + |
30 | 32 | except ImportError: |
31 | 33 | raise DidNotEnable("langchain not installed") |
32 | 34 |
|
@@ -71,6 +73,10 @@ def setup_once(): |
71 | 73 | # type: () -> None |
72 | 74 | manager._configure = _wrap_configure(manager._configure) |
73 | 75 |
|
| 76 | + if AgentExecutor is not None: |
| 77 | + AgentExecutor.invoke = _wrap_agent_executor_invoke(AgentExecutor.invoke) |
| 78 | + AgentExecutor.stream = _wrap_agent_executor_stream(AgentExecutor.stream) |
| 79 | + |
74 | 80 |
|
75 | 81 | class WatchedSpan: |
76 | 82 | span = None # type: Span |
@@ -265,7 +271,7 @@ def on_llm_start( |
265 | 271 |
|
266 | 272 | watched_span = self._create_span( |
267 | 273 | run_id, |
268 | | - kwargs.get("parent_run_id"), |
| 274 | + parent_id=parent_run_id, |
269 | 275 | op=OP.GEN_AI_PIPELINE, |
270 | 276 | name=kwargs.get("name") or "Langchain LLM call", |
271 | 277 | origin=LangchainIntegration.origin, |
@@ -293,7 +299,7 @@ def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs): |
293 | 299 | all_params.update(serialized.get("kwargs", {})) |
294 | 300 | watched_span = self._create_span( |
295 | 301 | run_id, |
296 | | - kwargs.get("parent_run_id"), |
| 302 | + parent_id=kwargs.get("parent_run_id"), |
297 | 303 | op=OP.GEN_AI_CHAT, |
298 | 304 | name=kwargs.get("name") or "Langchain Chat Model", |
299 | 305 | origin=LangchainIntegration.origin, |
@@ -483,22 +489,7 @@ def on_chain_error(self, error, *, run_id, **kwargs): |
483 | 489 |
|
484 | 490 | def on_agent_action(self, action, *, run_id, **kwargs): |
485 | 491 | # type: (SentryLangchainCallback, AgentAction, UUID, Any) -> Any |
486 | | - with capture_internal_exceptions(): |
487 | | - if not run_id: |
488 | | - return |
489 | | - watched_span = self._create_span( |
490 | | - run_id, |
491 | | - kwargs.get("parent_run_id"), |
492 | | - op=OP.GEN_AI_INVOKE_AGENT, |
493 | | - name=action.tool or "AI tool usage", |
494 | | - origin=LangchainIntegration.origin, |
495 | | - ) |
496 | | - if action.tool_input and should_send_default_pii() and self.include_prompts: |
497 | | - set_data_normalized( |
498 | | - watched_span.span, |
499 | | - SPANDATA.GEN_AI_REQUEST_MESSAGES, |
500 | | - action.tool_input, |
501 | | - ) |
| 492 | + pass |
502 | 493 |
|
503 | 494 | def on_agent_finish(self, finish, *, run_id, **kwargs): |
504 | 495 | # type: (SentryLangchainCallback, AgentFinish, UUID, Any) -> Any |
@@ -528,7 +519,7 @@ def on_tool_start(self, serialized, input_str, *, run_id, **kwargs): |
528 | 519 |
|
529 | 520 | watched_span = self._create_span( |
530 | 521 | run_id, |
531 | | - kwargs.get("parent_run_id"), |
| 522 | + parent_id=kwargs.get("parent_run_id"), |
532 | 523 | op=OP.GEN_AI_EXECUTE_TOOL, |
533 | 524 | name=f"execute_tool {tool_name}", |
534 | 525 | origin=LangchainIntegration.origin, |
@@ -657,3 +648,63 @@ def new_configure( |
657 | 648 | ) |
658 | 649 |
|
659 | 650 | return new_configure |
| 651 | + |
| 652 | + |
| 653 | +def _wrap_agent_executor_invoke(f): |
| 654 | + # type: (Callable[..., Any]) -> Callable[..., Any] |
| 655 | + |
| 656 | + @wraps(f) |
| 657 | + def new_invoke(self, *args, **kwargs): |
| 658 | + # type: (Any, Any, Any) -> Any |
| 659 | + |
| 660 | + integration = sentry_sdk.get_client().get_integration(LangchainIntegration) |
| 661 | + if integration is None: |
| 662 | + return f(self, *args, **kwargs) |
| 663 | + |
| 664 | + # Create a span that will act as the parent for all callback-generated spans |
| 665 | + with sentry_sdk.start_span( |
| 666 | + op=OP.GEN_AI_INVOKE_AGENT, |
| 667 | + name="AgentExecutor.invoke", |
| 668 | + origin=LangchainIntegration.origin, |
| 669 | + ) as span: |
| 670 | + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") |
| 671 | + if hasattr(self, "agent") and hasattr(self.agent, "llm"): |
| 672 | + model_name = getattr(self.agent.llm, "model_name", None) or getattr( |
| 673 | + self.agent.llm, "model", None |
| 674 | + ) |
| 675 | + if model_name: |
| 676 | + span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name) |
| 677 | + |
| 678 | + return f(self, *args, **kwargs) |
| 679 | + |
| 680 | + return new_invoke |
| 681 | + |
| 682 | + |
| 683 | +def _wrap_agent_executor_stream(f): |
| 684 | + # type: (Callable[..., Any]) -> Callable[..., Any] |
| 685 | + |
| 686 | + @wraps(f) |
| 687 | + def new_stream(self, *args, **kwargs): |
| 688 | + # type: (Any, Any, Any) -> Any |
| 689 | + |
| 690 | + integration = sentry_sdk.get_client().get_integration(LangchainIntegration) |
| 691 | + if integration is None: |
| 692 | + return f(self, *args, **kwargs) |
| 693 | + |
| 694 | + # Create a span that will act as the parent for all callback-generated spans |
| 695 | + with sentry_sdk.start_span( |
| 696 | + op=OP.GEN_AI_INVOKE_AGENT, |
| 697 | + name="AgentExecutor.stream", |
| 698 | + origin=LangchainIntegration.origin, |
| 699 | + ) as span: |
| 700 | + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") |
| 701 | + if hasattr(self, "agent") and hasattr(self.agent, "llm"): |
| 702 | + model_name = getattr(self.agent.llm, "model_name", None) or getattr( |
| 703 | + self.agent.llm, "model", None |
| 704 | + ) |
| 705 | + if model_name: |
| 706 | + span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name) |
| 707 | + |
| 708 | + return f(self, *args, **kwargs) |
| 709 | + |
| 710 | + return new_stream |
0 commit comments