From b1b500a1137e07cbe89d7b62392c2109e3b653aa Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Thu, 15 May 2025 20:32:12 +0530 Subject: [PATCH 01/13] Add SmoLAgents instrumentation --- agentops/instrumentation/__init__.py | 5 + agentops/instrumentation/common/attributes.py | 24 +++ agentops/instrumentation/smolagents/README.md | 88 +++++++++++ .../instrumentation/smolagents/__init__.py | 7 + .../smolagents/attributes/agent.py | 145 ++++++++++++++++++ .../smolagents/attributes/model.py | 137 +++++++++++++++++ .../smolagents/instrumentor.py | 132 ++++++++++++++++ .../smolagents/stream_wrapper.py | 125 +++++++++++++++ 8 files changed, 663 insertions(+) create mode 100644 agentops/instrumentation/smolagents/README.md create mode 100644 agentops/instrumentation/smolagents/__init__.py create mode 100644 agentops/instrumentation/smolagents/attributes/agent.py create mode 100644 agentops/instrumentation/smolagents/attributes/model.py create mode 100644 agentops/instrumentation/smolagents/instrumentor.py create mode 100644 agentops/instrumentation/smolagents/stream_wrapper.py diff --git a/agentops/instrumentation/__init__.py b/agentops/instrumentation/__init__.py index 79ce59981..9f85a10c3 100644 --- a/agentops/instrumentation/__init__.py +++ b/agentops/instrumentation/__init__.py @@ -87,6 +87,11 @@ def get_instance(self) -> BaseInstrumentor: class_name="AG2Instrumentor", provider_import_name="autogen", ), + InstrumentorLoader( + module_name="agentops.instrumentation.smolagents", + class_name="SmoLAgentsInstrumentor", + provider_import_name="smolagents", + ), ] diff --git a/agentops/instrumentation/common/attributes.py b/agentops/instrumentation/common/attributes.py index f267d615e..809121923 100644 --- a/agentops/instrumentation/common/attributes.py +++ b/agentops/instrumentation/common/attributes.py @@ -252,3 +252,27 @@ def get_base_span_attributes(span: Any) -> AttributeMap: attributes[CoreAttributes.PARENT_ID] = parent_id return attributes + + +def extract_token_usage(response: Any) -> Dict[str, int]: + """Extract token usage information from a response. + + Args: + response: The response object to extract token usage from + + Returns: + Dictionary containing token usage information + """ + usage = {} + + # Try to extract token counts from response + if hasattr(response, "usage"): + usage_data = response.usage + if hasattr(usage_data, "prompt_tokens"): + usage["prompt_tokens"] = usage_data.prompt_tokens + if hasattr(usage_data, "completion_tokens"): + usage["completion_tokens"] = usage_data.completion_tokens + if hasattr(usage_data, "total_tokens"): + usage["total_tokens"] = usage_data.total_tokens + + return usage diff --git a/agentops/instrumentation/smolagents/README.md b/agentops/instrumentation/smolagents/README.md new file mode 100644 index 000000000..20d63ff37 --- /dev/null +++ b/agentops/instrumentation/smolagents/README.md @@ -0,0 +1,88 @@ +# SmoLAgents Instrumentation + +This module provides OpenTelemetry instrumentation for the SmoLAgents framework. It captures telemetry data from model operations, agent executions, and tool usage. + +## Features + +- Model operation tracking + - Text generation + - Token usage + - Streaming responses + - Latency metrics + +- Agent execution monitoring + - Step-by-step execution + - Planning phases + - Tool usage + - Execution time + +- Tool usage analytics + - Tool call patterns + - Success/failure rates + - Execution time + - Error tracking + +## Usage + +```python +from agentops import init +from agentops.instrumentation.smolagents import SmoLAgentsInstrumentor + +# Initialize AgentOps with your API key +init(api_key="your-api-key") + +# The instrumentation will be automatically activated +# All SmoLAgents operations will now be tracked +``` + +## Metrics Collected + +1. Token Usage + - Input tokens + - Output tokens + - Total tokens per operation + +2. Timing Metrics + - Operation duration + - Time to first token (streaming) + - Tool execution time + - Planning phase duration + +3. Agent Metrics + - Step counts + - Planning steps + - Tools used + - Success/failure rates + +4. Error Tracking + - Generation errors + - Tool execution errors + - Parsing errors + +## Architecture + +The instrumentation is built on OpenTelemetry and follows the same pattern as other AgentOps instrumentors: + +1. Attribute Extractors + - Model attributes + - Agent attributes + - Tool call attributes + +2. Wrappers + - Method wrappers for sync operations + - Stream wrappers for async operations + - Context propagation handling + +3. Metrics + - Histograms for distributions + - Counters for events + - Custom attributes for filtering + +## Contributing + +When adding new features or modifying existing ones: + +1. Follow the established pattern for attribute extraction +2. Maintain context propagation +3. Add appropriate error handling +4. Update tests and documentation \ No newline at end of file diff --git a/agentops/instrumentation/smolagents/__init__.py b/agentops/instrumentation/smolagents/__init__.py new file mode 100644 index 000000000..7fe3338e9 --- /dev/null +++ b/agentops/instrumentation/smolagents/__init__.py @@ -0,0 +1,7 @@ +"""SmoLAgents instrumentation for AgentOps.""" +from .instrumentor import SmoLAgentsInstrumentor + +LIBRARY_NAME = "smolagents" +LIBRARY_VERSION = "1.16.0" + +__all__ = ["SmoLAgentsInstrumentor"] diff --git a/agentops/instrumentation/smolagents/attributes/agent.py b/agentops/instrumentation/smolagents/attributes/agent.py new file mode 100644 index 000000000..445f4ffee --- /dev/null +++ b/agentops/instrumentation/smolagents/attributes/agent.py @@ -0,0 +1,145 @@ +"""Attribute extractors for SmoLAgents agent operations.""" + +from typing import Any, Dict, Optional, Tuple +import uuid +import time + +from agentops.instrumentation.common.attributes import get_common_attributes +from agentops.semconv.agent import AgentAttributes +from agentops.semconv.tool import ToolAttributes + + +def get_agent_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> Dict[str, Any]: + """Extract attributes from an agent execution. + + Args: + args: Optional tuple of positional arguments + kwargs: Optional dict of keyword arguments + return_value: Optional return value from the wrapped function + + Returns: + Dict containing extracted attributes + """ + attributes = get_common_attributes() + + # Extract agent info from instance + if args and len(args) > 0: + instance = args[0] + agent_type = instance.__class__.__name__ + tools = [t.name for t in instance.tools] if hasattr(instance, "tools") else [] + attributes.update( + { + AgentAttributes.AGENT_ID: str(uuid.uuid4()), + AgentAttributes.AGENT_NAME: agent_type, + AgentAttributes.AGENT_ROLE: "executor", + AgentAttributes.AGENT_TOOLS: tools, + } + ) + + # Extract task from kwargs or args + task = kwargs.get("task", args[1] if len(args) > 1 else "unknown") if kwargs else "unknown" + attributes[AgentAttributes.AGENT_REASONING] = task + + return attributes + + +def get_tool_call_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> Dict[str, Any]: + """Extract attributes from a tool call. + + Args: + args: Optional tuple of positional arguments + kwargs: Optional dict of keyword arguments + return_value: Optional return value from the wrapped function + + Returns: + Dict containing extracted attributes + """ + attributes = get_common_attributes() + + # Extract tool info from instance and args + if args and len(args) > 0: + instance = args[0] + tool_name = instance.name if hasattr(instance, "name") else "unknown" + tool_description = instance.description if hasattr(instance, "description") else "unknown" + + # Get arguments from args/kwargs + arguments = {} + if len(args) > 1: + arguments = args[1] + elif kwargs: + arguments = kwargs + + # Track execution time and success + start_time = time.time() + error = None + + try: + if return_value is not None: + execution_time = time.time() - start_time + attributes.update( + { + ToolAttributes.TOOL_ID: str(uuid.uuid4()), + ToolAttributes.TOOL_NAME: tool_name, + ToolAttributes.TOOL_DESCRIPTION: tool_description, + ToolAttributes.TOOL_PARAMETERS: arguments, + ToolAttributes.TOOL_STATUS: "success", + ToolAttributes.TOOL_RESULT: str(return_value), + "tool.execution_time": execution_time, + } + ) + except Exception as e: + error = str(e) + attributes.update( + { + ToolAttributes.TOOL_ID: str(uuid.uuid4()), + ToolAttributes.TOOL_NAME: tool_name, + ToolAttributes.TOOL_DESCRIPTION: tool_description, + ToolAttributes.TOOL_PARAMETERS: arguments, + ToolAttributes.TOOL_STATUS: "error", + ToolAttributes.TOOL_ERROR: error, + } + ) + + return attributes + + +def get_planning_step_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> Dict[str, Any]: + """Extract attributes from a planning step. + + Args: + args: Optional tuple of positional arguments + kwargs: Optional dict of keyword arguments + return_value: Optional return value from the wrapped function + + Returns: + Dict containing extracted attributes + """ + attributes = get_common_attributes() + + # Extract step info from kwargs + if kwargs: + step_number = kwargs.get("step_number", 0) + is_first_step = kwargs.get("is_first_step", False) + task = kwargs.get("task", "unknown") + + attributes.update( + { + AgentAttributes.AGENT_REASONING: task, + "planning.step_number": step_number, + "planning.is_first_step": is_first_step, + } + ) + + return attributes diff --git a/agentops/instrumentation/smolagents/attributes/model.py b/agentops/instrumentation/smolagents/attributes/model.py new file mode 100644 index 000000000..ba6eaf100 --- /dev/null +++ b/agentops/instrumentation/smolagents/attributes/model.py @@ -0,0 +1,137 @@ +"""Attribute extractors for SmoLAgents model operations.""" + +from typing import Any, Dict, Optional, Tuple + +from agentops.instrumentation.common.attributes import ( + get_common_attributes, + _extract_attributes_from_mapping, +) +from agentops.semconv.message import MessageAttributes + + +def get_model_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> Dict[str, Any]: + """Extract attributes from a model generation call. + + Args: + args: Optional tuple of positional arguments + kwargs: Optional dict of keyword arguments + return_value: Optional return value from the wrapped function + + Returns: + Dict containing extracted attributes + """ + attributes = get_common_attributes() + + # Extract model info from instance + if args and len(args) > 1: + instance = args[1] + model_id = instance.model_id if hasattr(instance, "model_id") else "unknown" + attributes.update( + { + "gen_ai.model.id": model_id, + "gen_ai.model.name": model_id.split("/")[-1] if "/" in model_id else model_id, + } + ) + + # Extract messages from kwargs + if kwargs: + messages = kwargs.get("messages", []) + if messages: + for i, msg in enumerate(messages): + msg_attrs = { + "role": msg.get("role", "unknown"), + "content": msg.get("content", ""), + } + attributes.update( + _extract_attributes_from_mapping( + msg_attrs, + { + MessageAttributes.PROMPT_ROLE.format(i=i): "role", + MessageAttributes.PROMPT_CONTENT.format(i=i): "content", + }, + ) + ) + + # Add response info if available + if return_value: + resp_attrs = { + "content": return_value.get("content", "") if isinstance(return_value, dict) else str(return_value), + } + attributes.update( + _extract_attributes_from_mapping( + resp_attrs, + { + MessageAttributes.COMPLETION_CONTENT.format(i=0): "content", + }, + ) + ) + + return attributes + + +def get_stream_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> Dict[str, Any]: + """Extract attributes from a streaming model response. + + Args: + args: Optional tuple of positional arguments + kwargs: Optional dict of keyword arguments + return_value: Optional return value from the wrapped function + + Returns: + Dict containing extracted attributes + """ + attributes = get_common_attributes() + + # Extract model info from instance + if args and len(args) > 1: + instance = args[1] + model_id = instance.model_id if hasattr(instance, "model_id") else "unknown" + attributes.update( + { + "gen_ai.model.id": model_id, + "gen_ai.model.name": model_id.split("/")[-1] if "/" in model_id else model_id, + } + ) + + # Extract messages from kwargs + if kwargs: + messages = kwargs.get("messages", []) + if messages: + for i, msg in enumerate(messages): + msg_attrs = { + "role": msg.get("role", "unknown"), + "content": msg.get("content", ""), + } + attributes.update( + _extract_attributes_from_mapping( + msg_attrs, + { + MessageAttributes.PROMPT_ROLE.format(i=i): "role", + MessageAttributes.PROMPT_CONTENT.format(i=i): "content", + }, + ) + ) + + # Add chunk info if available + if return_value: + chunk_attrs = { + "content": return_value.get("content", "") if isinstance(return_value, dict) else str(return_value), + } + attributes.update( + _extract_attributes_from_mapping( + chunk_attrs, + { + MessageAttributes.COMPLETION_CONTENT.format(i=0): "content", + }, + ) + ) + + return attributes diff --git a/agentops/instrumentation/smolagents/instrumentor.py b/agentops/instrumentation/smolagents/instrumentor.py new file mode 100644 index 000000000..eeedd3e0c --- /dev/null +++ b/agentops/instrumentation/smolagents/instrumentor.py @@ -0,0 +1,132 @@ +"""SmoLAgents instrumentation for AgentOps.""" + +from typing import Collection +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.trace import get_tracer, SpanKind + +from agentops.instrumentation.common.wrappers import WrapConfig, wrap, unwrap +from agentops.instrumentation.smolagents import LIBRARY_VERSION +from agentops.instrumentation.smolagents.attributes.agent import ( + get_agent_attributes, + get_tool_call_attributes, + get_planning_step_attributes, +) + + +class SmoLAgentsInstrumentor(BaseInstrumentor): + """An instrumentor for SmoLAgents.""" + + def instrumentation_dependencies(self) -> Collection[str]: + """Get instrumentation dependencies. + + Returns: + Collection of package names requiring instrumentation + """ + return [] + + def _instrument(self, **kwargs): + """Instrument SmoLAgents. + + Args: + **kwargs: Instrumentation options + """ + tracer = get_tracer( + __name__, + LIBRARY_VERSION, + schema_url="https://opentelemetry.io/schemas/1.11.0", + ) + + # Instrument ToolCallingAgent + wrap( + WrapConfig( + trace_name="tool_calling_agent.run", + package="smolagents.agents", + class_name="ToolCallingAgent", + method_name="run", + handler=get_agent_attributes, + span_kind=SpanKind.CLIENT, + ), + tracer=tracer, + ) + + # Instrument CodeAgent + wrap( + WrapConfig( + trace_name="code_agent.run", + package="smolagents.agents", + class_name="CodeAgent", + method_name="run", + handler=get_agent_attributes, + span_kind=SpanKind.CLIENT, + ), + tracer=tracer, + ) + + # Instrument tool execution + wrap( + WrapConfig( + trace_name="tool.execute", + package="smolagents.tools", + class_name="Tool", + method_name="__call__", + handler=get_tool_call_attributes, + span_kind=SpanKind.CLIENT, + ), + tracer=tracer, + ) + + # Instrument planning steps + wrap( + WrapConfig( + trace_name="tool_calling_agent.plan", + package="smolagents.agents", + class_name="ToolCallingAgent", + method_name="_generate_planning_step", + handler=get_planning_step_attributes, + span_kind=SpanKind.CLIENT, + ), + tracer=tracer, + ) + + def _uninstrument(self, **kwargs): + """Remove SmoLAgents instrumentation. + + Args: + **kwargs: Uninstrumentation options + """ + unwrap( + WrapConfig( + trace_name="tool_calling_agent.run", + package="smolagents.agents", + class_name="ToolCallingAgent", + method_name="run", + handler=get_agent_attributes, + ) + ) + unwrap( + WrapConfig( + trace_name="code_agent.run", + package="smolagents.agents", + class_name="CodeAgent", + method_name="run", + handler=get_agent_attributes, + ) + ) + unwrap( + WrapConfig( + trace_name="tool.execute", + package="smolagents.tools", + class_name="Tool", + method_name="__call__", + handler=get_tool_call_attributes, + ) + ) + unwrap( + WrapConfig( + trace_name="tool_calling_agent.plan", + package="smolagents.agents", + class_name="ToolCallingAgent", + method_name="_generate_planning_step", + handler=get_planning_step_attributes, + ) + ) diff --git a/agentops/instrumentation/smolagents/stream_wrapper.py b/agentops/instrumentation/smolagents/stream_wrapper.py new file mode 100644 index 000000000..fa1768404 --- /dev/null +++ b/agentops/instrumentation/smolagents/stream_wrapper.py @@ -0,0 +1,125 @@ +"""Stream wrapper for SmoLAgents model streaming responses.""" + +import time +import uuid + +from opentelemetry.trace import Status, StatusCode + +from agentops.semconv.message import MessageAttributes +from agentops.semconv.agent import AgentAttributes +from agentops.semconv.tool import ToolAttributes +from .attributes.model import get_stream_attributes + + +def model_stream_wrapper(tracer): + """Wrapper for model streaming methods. + + Args: + tracer: OpenTelemetry tracer + + Returns: + Wrapped function + """ + + def wrapper(wrapped, instance, args, kwargs): + messages = kwargs.get("messages", []) + model_id = instance.model_id if hasattr(instance, "model_id") else "unknown" + + with tracer.start_as_current_span( + name=f"{model_id}.generate_stream", attributes=get_stream_attributes(model_id=model_id, messages=messages) + ) as span: + try: + # Start streaming + stream = wrapped(*args, **kwargs) + first_token_received = False + start_time = time.time() + accumulated_text = "" + + # Process stream + for chunk in stream: + if not first_token_received: + first_token_received = True + span.set_attribute("gen_ai.time_to_first_token", time.time() - start_time) + + # Accumulate text and update attributes + if hasattr(chunk, "content") and chunk.content: + accumulated_text += chunk.content + span.set_attribute(MessageAttributes.COMPLETION_CONTENT.format(i=0), accumulated_text) + span.set_attribute(MessageAttributes.COMPLETION_TYPE.format(i=0), "text") + + yield chunk + + # Set final attributes + span.set_attribute("gen_ai.streaming_duration", time.time() - start_time) + span.set_status(Status(StatusCode.OK)) + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + raise + + return wrapper + + +def agent_stream_wrapper(tracer): + """Wrapper for agent streaming methods. + + Args: + tracer: OpenTelemetry tracer + + Returns: + Wrapped function + """ + + def wrapper(wrapped, instance, args, kwargs): + task = kwargs.get("task", args[0] if args else "unknown") + agent_type = instance.__class__.__name__ + agent_id = str(uuid.uuid4()) + + with tracer.start_as_current_span( + name=f"{agent_type}.run_stream", + attributes={ + AgentAttributes.AGENT_ID: agent_id, + AgentAttributes.AGENT_NAME: agent_type, + AgentAttributes.AGENT_ROLE: "executor", + AgentAttributes.AGENT_REASONING: task, + }, + ) as span: + try: + # Initialize counters + step_count = 0 + planning_steps = 0 + tools_used = set() + start_time = time.time() + + # Process stream + stream = wrapped(*args, **kwargs) + for step in stream: + step_count += 1 + + # Track step types + if hasattr(step, "type"): + if step.type == "planning": + planning_steps += 1 + elif step.type == "tool_call": + tools_used.add(step.tool_name) + # Add tool-specific attributes + span.set_attribute(ToolAttributes.TOOL_NAME, step.tool_name) + if hasattr(step, "arguments"): + span.set_attribute(ToolAttributes.TOOL_PARAMETERS, step.arguments) + + # Update span attributes + span.set_attribute("agent.step_count", step_count) + span.set_attribute("agent.planning_steps", planning_steps) + span.set_attribute(AgentAttributes.AGENT_TOOLS, list(tools_used)) + + yield step + + # Set final attributes + span.set_attribute("agent.execution_time", time.time() - start_time) + span.set_status(Status(StatusCode.OK)) + + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + raise + + return wrapper From d8dd43ad725d67821b4873f7e0c99a2dbaaa369b Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Fri, 30 May 2025 20:29:22 +0530 Subject: [PATCH 02/13] Remove deprecated instrumentor definitions from `__init__.py` to streamline the instrumentation module. This change enhances code clarity and prepares for future updates. --- agentops/instrumentation/__init__.py | 39 ---------------------------- 1 file changed, 39 deletions(-) diff --git a/agentops/instrumentation/__init__.py b/agentops/instrumentation/__init__.py index 85f7e786e..d4e271f3d 100644 --- a/agentops/instrumentation/__init__.py +++ b/agentops/instrumentation/__init__.py @@ -253,45 +253,6 @@ def get_instance(self) -> BaseInstrumentor: return getattr(self.module, self.class_name)() -available_instrumentors: list[InstrumentorLoader] = [ - InstrumentorLoader( - module_name="agentops.instrumentation.openai", - class_name="OpenAIInstrumentor", - provider_import_name="openai", - ), - InstrumentorLoader( - module_name="agentops.instrumentation.anthropic", - class_name="AnthropicInstrumentor", - provider_import_name="anthropic", - ), - InstrumentorLoader( - module_name="agentops.instrumentation.crewai", - class_name="CrewAIInstrumentor", - provider_import_name="crewai", - ), - InstrumentorLoader( - module_name="agentops.instrumentation.openai_agents", - class_name="OpenAIAgentsInstrumentor", - provider_import_name="agents", - ), - InstrumentorLoader( - module_name="agentops.instrumentation.google_generativeai", - class_name="GoogleGenerativeAIInstrumentor", - provider_import_name="google.genai", - ), - InstrumentorLoader( - module_name="agentops.instrumentation.ibm_watsonx_ai", - class_name="IBMWatsonXInstrumentor", - provider_import_name="ibm_watsonx_ai", - ), - InstrumentorLoader( - module_name="agentops.instrumentation.ag2", - class_name="AG2Instrumentor", - provider_import_name="autogen", - ), -] - - def instrument_one(loader: InstrumentorLoader) -> Optional[BaseInstrumentor]: """ Instrument a single package using the provided loader. From 184584ec059baf72af9a015bffc976218d19be00 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Fri, 30 May 2025 21:09:50 +0530 Subject: [PATCH 03/13] Add SmoLAgents instrumentation --- agentops/instrumentation/__init__.py | 5 + agentops/instrumentation/common/wrappers.py | 38 +- .../instrumentation/smolagents/__init__.py | 3 +- .../smolagents/attributes/agent.py | 343 +++++++++++++++--- .../smolagents/attributes/model.py | 225 ++++++++---- .../smolagents/instrumentor.py | 281 ++++++++++---- .../smolagents/stream_wrapper.py | 137 ++++++- 7 files changed, 845 insertions(+), 187 deletions(-) diff --git a/agentops/instrumentation/__init__.py b/agentops/instrumentation/__init__.py index d4e271f3d..9c5bf1d34 100644 --- a/agentops/instrumentation/__init__.py +++ b/agentops/instrumentation/__init__.py @@ -208,6 +208,11 @@ class InstrumentorConfig(TypedDict): "class_name": "GoogleADKInstrumentor", "min_version": "0.1.0", }, + "smolagents": { + "module_name": "agentops.instrumentation.smolagents", + "class_name": "SmoLAgentsInstrumentor", + "min_version": "1.8.0", + }, } # Combine all target packages for monitoring diff --git a/agentops/instrumentation/common/wrappers.py b/agentops/instrumentation/common/wrappers.py index c1127b8f9..ac3c551b1 100644 --- a/agentops/instrumentation/common/wrappers.py +++ b/agentops/instrumentation/common/wrappers.py @@ -6,7 +6,7 @@ spans with attributes, and functions for creating and applying wrappers. """ -from typing import Any, Optional, Tuple, Dict, Callable +from typing import Any, Optional, Tuple, Dict, Callable, Union from dataclasses import dataclass import logging from wrapt import wrap_function_wrapper # type: ignore @@ -21,6 +21,7 @@ logger = logging.getLogger(__name__) AttributeHandler = Callable[[Optional[Tuple], Optional[Dict], Optional[Any]], AttributeMap] +DynamicTraceName = Callable[[Optional[Tuple], Optional[Dict], Optional[Any]], str] @dataclass @@ -32,7 +33,7 @@ class WrapConfig: to set, and how to name the resulting trace spans. Attributes: - trace_name: The name to use for the trace span + trace_name: The name to use for the trace span (can be string or callable) package: The package containing the target class class_name: The name of the class containing the method method_name: The name of the method to wrap @@ -43,7 +44,7 @@ class WrapConfig: span_kind: The kind of span to create (default: CLIENT) """ - trace_name: str + trace_name: Union[str, DynamicTraceName] package: str class_name: str method_name: str @@ -55,6 +56,33 @@ def __repr__(self): return f"{self.package}.{self.class_name}.{self.method_name}" +def _get_span_name( + trace_name: Union[str, DynamicTraceName], + args: Optional[Tuple], + kwargs: Optional[Dict], + instance: Optional[Any] = None, +) -> str: + """Get the span name, handling both static strings and dynamic functions. + + Args: + trace_name: The trace name (string or callable) + args: Optional tuple of positional arguments + kwargs: Optional dict of keyword arguments + instance: Optional instance object + + Returns: + The span name to use + """ + if callable(trace_name): + try: + return trace_name(args, kwargs, instance) + except Exception as e: + logger.warning(f"Failed to generate dynamic span name: {e}") + return "unknown_operation" + else: + return trace_name + + def _update_span(span: Span, attributes: AttributeMap) -> None: """Update a span with the provided attributes. @@ -114,7 +142,7 @@ async def awrapper(wrapped, instance, args, kwargs): return_value = None with tracer.start_as_current_span( - wrap_config.trace_name, + _get_span_name(wrap_config.trace_name, args, kwargs, instance), kind=wrap_config.span_kind, ) as span: try: @@ -146,7 +174,7 @@ def wrapper(wrapped, instance, args, kwargs): return_value = None with tracer.start_as_current_span( - wrap_config.trace_name, + _get_span_name(wrap_config.trace_name, args, kwargs, instance), kind=wrap_config.span_kind, ) as span: try: diff --git a/agentops/instrumentation/smolagents/__init__.py b/agentops/instrumentation/smolagents/__init__.py index 7fe3338e9..46e6831dc 100644 --- a/agentops/instrumentation/smolagents/__init__.py +++ b/agentops/instrumentation/smolagents/__init__.py @@ -1,7 +1,8 @@ """SmoLAgents instrumentation for AgentOps.""" -from .instrumentor import SmoLAgentsInstrumentor LIBRARY_NAME = "smolagents" LIBRARY_VERSION = "1.16.0" +from agentops.instrumentation.smolagents.instrumentor import SmoLAgentsInstrumentor # noqa: E402 + __all__ = ["SmoLAgentsInstrumentor"] diff --git a/agentops/instrumentation/smolagents/attributes/agent.py b/agentops/instrumentation/smolagents/attributes/agent.py index 445f4ffee..71e5e7c15 100644 --- a/agentops/instrumentation/smolagents/attributes/agent.py +++ b/agentops/instrumentation/smolagents/attributes/agent.py @@ -2,11 +2,13 @@ from typing import Any, Dict, Optional, Tuple import uuid -import time +import json from agentops.instrumentation.common.attributes import get_common_attributes from agentops.semconv.agent import AgentAttributes from agentops.semconv.tool import ToolAttributes +from agentops.semconv.message import MessageAttributes +from agentops.semconv.span_attributes import SpanAttributes def get_agent_attributes( @@ -30,29 +32,128 @@ def get_agent_attributes( if args and len(args) > 0: instance = args[0] agent_type = instance.__class__.__name__ - tools = [t.name for t in instance.tools] if hasattr(instance, "tools") else [] + + # Get tools and managed agents + tools = [] + if hasattr(instance, "tools") and instance.tools: + tools = list(instance.tools.keys()) + managed_agents = [] + if hasattr(instance, "managed_agents") and instance.managed_agents: + managed_agents = list(instance.managed_agents.keys()) + + # Get model info + model_id = None + if hasattr(instance, "model") and hasattr(instance.model, "model_id"): + model_id = instance.model.model_id + attributes.update( { AgentAttributes.AGENT_ID: str(uuid.uuid4()), AgentAttributes.AGENT_NAME: agent_type, AgentAttributes.AGENT_ROLE: "executor", AgentAttributes.AGENT_TOOLS: tools, + "agent.managed_agents": managed_agents, } ) + # Only add attributes if they have non-None values + max_steps = getattr(instance, "max_steps", None) + if max_steps is not None: + attributes["agent.max_steps"] = max_steps + + planning_interval = getattr(instance, "planning_interval", None) + if planning_interval is not None: + attributes["agent.planning_interval"] = planning_interval + + if model_id: + attributes[SpanAttributes.LLM_REQUEST_MODEL] = model_id + # Extract task from kwargs or args - task = kwargs.get("task", args[1] if len(args) > 1 else "unknown") if kwargs else "unknown" - attributes[AgentAttributes.AGENT_REASONING] = task + if kwargs: + task = kwargs.get("task") + stream = kwargs.get("stream", False) + reset = kwargs.get("reset", True) + max_steps = kwargs.get("max_steps") + additional_args = kwargs.get("additional_args") + + if task: + attributes[AgentAttributes.AGENT_REASONING] = task + attributes["agent.stream_mode"] = stream + attributes["agent.reset"] = reset + if max_steps is not None: + attributes["agent.max_steps_override"] = max_steps + if additional_args: + attributes["agent.additional_args"] = json.dumps(additional_args) + elif args and len(args) > 1: + attributes[AgentAttributes.AGENT_REASONING] = args[1] + + # Handle return value for full result mode + if return_value is not None: + if hasattr(return_value, "output"): + # RunResult object + attributes[SpanAttributes.AGENTOPS_ENTITY_OUTPUT] = str(return_value.output) + attributes["agent.result_state"] = return_value.state + if return_value.token_usage: + attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = return_value.token_usage.input_tokens + attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = return_value.token_usage.output_tokens + attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = ( + return_value.token_usage.input_tokens + return_value.token_usage.output_tokens + ) + else: + attributes[SpanAttributes.AGENTOPS_ENTITY_OUTPUT] = str(return_value) return attributes -def get_tool_call_attributes( +def get_agent_stream_attributes( args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, return_value: Optional[Any] = None, ) -> Dict[str, Any]: - """Extract attributes from a tool call. + """Extract attributes from agent streaming execution. + + Args: + args: Optional tuple of positional arguments + kwargs: Optional dict of keyword arguments + return_value: Optional return value from the wrapped function + + Returns: + Dict containing extracted attributes + """ + attributes = get_common_attributes() + + # Extract agent info from instance + if args and len(args) > 0: + instance = args[0] + agent_type = instance.__class__.__name__ + + attributes.update( + { + AgentAttributes.AGENT_NAME: agent_type, + AgentAttributes.AGENT_ROLE: "executor", + "agent.stream_mode": True, + } + ) + + # Extract task and parameters + if kwargs: + task = kwargs.get("task") + max_steps = kwargs.get("max_steps") + + if task: + attributes[AgentAttributes.AGENT_REASONING] = task + if max_steps: + attributes["agent.max_steps"] = max_steps + + return attributes + + +def get_agent_step_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> Dict[str, Any]: + """Extract attributes from an agent step execution. Args: args: Optional tuple of positional arguments @@ -64,50 +165,135 @@ def get_tool_call_attributes( """ attributes = get_common_attributes() - # Extract tool info from instance and args + # Extract agent info from instance if args and len(args) > 0: instance = args[0] - tool_name = instance.name if hasattr(instance, "name") else "unknown" - tool_description = instance.description if hasattr(instance, "description") else "unknown" - - # Get arguments from args/kwargs - arguments = {} - if len(args) > 1: - arguments = args[1] - elif kwargs: - arguments = kwargs - - # Track execution time and success - start_time = time.time() - error = None - - try: - if return_value is not None: - execution_time = time.time() - start_time + agent_type = instance.__class__.__name__ + + attributes.update( + { + AgentAttributes.AGENT_NAME: agent_type, + "agent.step_number": getattr(instance, "step_number", 0), + } + ) + + # Extract memory step info + if args and len(args) > 1: + memory_step = args[1] + if hasattr(memory_step, "step_number"): + attributes["step.number"] = memory_step.step_number + if hasattr(memory_step, "tool_calls") and memory_step.tool_calls: + # Extract tool call info + for i, tool_call in enumerate(memory_step.tool_calls): attributes.update( { - ToolAttributes.TOOL_ID: str(uuid.uuid4()), - ToolAttributes.TOOL_NAME: tool_name, - ToolAttributes.TOOL_DESCRIPTION: tool_description, - ToolAttributes.TOOL_PARAMETERS: arguments, - ToolAttributes.TOOL_STATUS: "success", - ToolAttributes.TOOL_RESULT: str(return_value), - "tool.execution_time": execution_time, + MessageAttributes.TOOL_CALL_ID.format(i=i): tool_call.id, + MessageAttributes.TOOL_CALL_NAME.format(i=i): tool_call.name, + MessageAttributes.TOOL_CALL_ARGUMENTS.format(i=i): json.dumps(tool_call.arguments), } ) - except Exception as e: - error = str(e) + if hasattr(memory_step, "error") and memory_step.error: + attributes["step.error"] = str(memory_step.error) + if hasattr(memory_step, "observations"): + attributes["step.observations"] = str(memory_step.observations) + + # Handle return value + if return_value is not None: + attributes[SpanAttributes.AGENTOPS_ENTITY_OUTPUT] = str(return_value) + + return attributes + + +def get_tool_call_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> Dict[str, Any]: + """Extract attributes from a tool call. + + Args: + args: Optional tuple of positional arguments + kwargs: Optional dict of keyword arguments + return_value: Optional return value from the wrapped function + + Returns: + Dict containing extracted attributes + """ + attributes = get_common_attributes() + + # Extract tool call information + tool_call = None + if kwargs and "tool_call" in kwargs: + tool_call = kwargs["tool_call"] + elif args and len(args) > 1: + tool_call = args[1] + + if tool_call: + # Extract tool call details + tool_id = str(uuid.uuid4()) + tool_name = "unknown" + tool_arguments = {} + + if hasattr(tool_call, "id"): + tool_id = tool_call.id + if hasattr(tool_call, "name"): + tool_name = tool_call.name + elif hasattr(tool_call, "function") and hasattr(tool_call.function, "name"): + tool_name = tool_call.function.name + + if hasattr(tool_call, "arguments"): + tool_arguments = tool_call.arguments + elif hasattr(tool_call, "function") and hasattr(tool_call.function, "arguments"): + try: + tool_arguments = ( + json.loads(tool_call.function.arguments) + if isinstance(tool_call.function.arguments, str) + else tool_call.function.arguments + ) + except (json.JSONDecodeError, TypeError): + tool_arguments = {"raw": str(tool_call.function.arguments)} + + attributes.update( + { + ToolAttributes.TOOL_ID: tool_id, + ToolAttributes.TOOL_NAME: tool_name, + ToolAttributes.TOOL_PARAMETERS: json.dumps(tool_arguments), + ToolAttributes.TOOL_STATUS: "pending", + ToolAttributes.TOOL_DESCRIPTION: "unknown", + } + ) + + # Extract instance information for Tool.__call__ style calls + if args and len(args) > 0: + instance = args[0] + if hasattr(instance, "__class__") and instance.__class__.__name__ in ["Tool", "DuckDuckGoSearchTool"]: + tool_name = getattr(instance, "name", instance.__class__.__name__) + tool_description = getattr(instance, "description", "unknown") + + # Update attributes with instance info attributes.update( { - ToolAttributes.TOOL_ID: str(uuid.uuid4()), ToolAttributes.TOOL_NAME: tool_name, ToolAttributes.TOOL_DESCRIPTION: tool_description, - ToolAttributes.TOOL_PARAMETERS: arguments, - ToolAttributes.TOOL_STATUS: "error", - ToolAttributes.TOOL_ERROR: error, } ) + # If there are additional args, they might be tool inputs + if len(args) > 1: + tool_inputs = {} + for i, arg in enumerate(args[1:], 1): + tool_inputs[f"arg_{i}"] = str(arg) + attributes[ToolAttributes.TOOL_PARAMETERS] = json.dumps(tool_inputs) + + # Handle return value + if return_value is not None: + attributes[ToolAttributes.TOOL_STATUS] = "success" + # Store the result if it's not too large + result_str = str(return_value) + if len(result_str) > 1000: + result_str = result_str[:1000] + "..." + attributes[ToolAttributes.TOOL_RESULT] = result_str + return attributes @@ -128,18 +314,91 @@ def get_planning_step_attributes( """ attributes = get_common_attributes() - # Extract step info from kwargs + # Extract agent info from instance + if args and len(args) > 0: + instance = args[0] + agent_type = instance.__class__.__name__ + + attributes.update( + { + AgentAttributes.AGENT_NAME: agent_type, + "planning.agent_type": agent_type, + } + ) + + # Extract planning step info from args + if args and len(args) > 1: + task = args[1] + attributes[AgentAttributes.AGENT_REASONING] = task + + # Extract kwargs if kwargs: - step_number = kwargs.get("step_number", 0) is_first_step = kwargs.get("is_first_step", False) - task = kwargs.get("task", "unknown") + step = kwargs.get("step", 0) attributes.update( { - AgentAttributes.AGENT_REASONING: task, - "planning.step_number": step_number, "planning.is_first_step": is_first_step, + "planning.step_number": step, } ) + # Handle generator return value + if return_value is not None: + # The return value is typically a generator for planning steps + attributes["planning.status"] = "completed" + + return attributes + + +def get_managed_agent_attributes( + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + return_value: Optional[Any] = None, +) -> Dict[str, Any]: + """Extract attributes from a managed agent call. + + Args: + args: Optional tuple of positional arguments + kwargs: Optional dict of keyword arguments + return_value: Optional return value from the wrapped function + + Returns: + Dict containing extracted attributes + """ + attributes = get_common_attributes() + + # Extract agent info from instance + if args and len(args) > 0: + instance = args[0] + agent_type = instance.__class__.__name__ + agent_name = getattr(instance, "name", agent_type) + agent_description = getattr(instance, "description", "") + + attributes.update( + { + AgentAttributes.AGENT_ID: str(uuid.uuid4()), + AgentAttributes.AGENT_NAME: agent_name, + AgentAttributes.AGENT_ROLE: "managed", + "agent.type": agent_type, + "agent.description": agent_description, + "agent.provide_run_summary": getattr(instance, "provide_run_summary", False), + } + ) + + # Extract task + if args and len(args) > 1: + task = args[1] + attributes[AgentAttributes.AGENT_REASONING] = task + elif kwargs and "task" in kwargs: + attributes[AgentAttributes.AGENT_REASONING] = kwargs["task"] + + # Handle return value + if return_value is not None: + if isinstance(return_value, dict): + # Managed agent typically returns a dict with task and output + attributes[SpanAttributes.AGENTOPS_ENTITY_OUTPUT] = json.dumps(return_value) + else: + attributes[SpanAttributes.AGENTOPS_ENTITY_OUTPUT] = str(return_value) + return attributes diff --git a/agentops/instrumentation/smolagents/attributes/model.py b/agentops/instrumentation/smolagents/attributes/model.py index ba6eaf100..0f02f9685 100644 --- a/agentops/instrumentation/smolagents/attributes/model.py +++ b/agentops/instrumentation/smolagents/attributes/model.py @@ -1,12 +1,13 @@ """Attribute extractors for SmoLAgents model operations.""" from typing import Any, Dict, Optional, Tuple +import json from agentops.instrumentation.common.attributes import ( get_common_attributes, - _extract_attributes_from_mapping, ) from agentops.semconv.message import MessageAttributes +from agentops.semconv.span_attributes import SpanAttributes def get_model_attributes( @@ -27,48 +28,131 @@ def get_model_attributes( attributes = get_common_attributes() # Extract model info from instance - if args and len(args) > 1: - instance = args[1] - model_id = instance.model_id if hasattr(instance, "model_id") else "unknown" + if args and len(args) > 0: + instance = args[0] + model_id = getattr(instance, "model_id", "unknown") + attributes.update( { - "gen_ai.model.id": model_id, - "gen_ai.model.name": model_id.split("/")[-1] if "/" in model_id else model_id, + SpanAttributes.LLM_REQUEST_MODEL: model_id, + SpanAttributes.LLM_SYSTEM: instance.__class__.__name__, } ) - # Extract messages from kwargs + # Extract model-specific attributes + if hasattr(instance, "temperature"): + attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] = instance.temperature + if hasattr(instance, "max_tokens"): + attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] = instance.max_tokens + if hasattr(instance, "api_base"): + attributes[SpanAttributes.LLM_OPENAI_API_BASE] = instance.api_base + + # Extract messages from args/kwargs + messages = None + if args and len(args) > 1: + messages = args[1] + elif kwargs and "messages" in kwargs: + messages = kwargs["messages"] + + if messages: + # Process prompt messages + for i, msg in enumerate(messages): + # Handle different message formats + if isinstance(msg, dict): + role = msg.get("role", "unknown") + content = msg.get("content", "") + + # Handle content that might be a list (for multimodal) + if isinstance(content, list): + text_content = "" + for item in content: + if isinstance(item, dict) and item.get("type") == "text": + text_content += item.get("text", "") + content = text_content + + attributes.update( + { + MessageAttributes.PROMPT_ROLE.format(i=i): role, + MessageAttributes.PROMPT_CONTENT.format(i=i): content, + MessageAttributes.PROMPT_TYPE.format(i=i): "text", + } + ) + + # Add speaker if it's from an agent + if "name" in msg: + attributes[MessageAttributes.PROMPT_SPEAKER.format(i=i)] = msg["name"] + + # Extract other parameters from kwargs if kwargs: - messages = kwargs.get("messages", []) - if messages: - for i, msg in enumerate(messages): - msg_attrs = { - "role": msg.get("role", "unknown"), - "content": msg.get("content", ""), - } + # Stop sequences + stop_sequences = kwargs.get("stop_sequences") + if stop_sequences: + attributes[SpanAttributes.LLM_REQUEST_STOP_SEQUENCES] = json.dumps(stop_sequences) + + # Response format + response_format = kwargs.get("response_format") + if response_format: + attributes["llm.request.response_format"] = json.dumps(response_format) + + # Tools + tools_to_call_from = kwargs.get("tools_to_call_from") + if tools_to_call_from: + tool_names = [tool.name for tool in tools_to_call_from] + attributes[SpanAttributes.LLM_REQUEST_FUNCTIONS] = json.dumps(tool_names) + + # Add detailed tool information + for i, tool in enumerate(tools_to_call_from): attributes.update( - _extract_attributes_from_mapping( - msg_attrs, - { - MessageAttributes.PROMPT_ROLE.format(i=i): "role", - MessageAttributes.PROMPT_CONTENT.format(i=i): "content", - }, - ) + { + MessageAttributes.TOOL_CALL_NAME.format(i=i): tool.name, + MessageAttributes.TOOL_CALL_DESCRIPTION.format(i=i): tool.description, + } ) - # Add response info if available - if return_value: - resp_attrs = { - "content": return_value.get("content", "") if isinstance(return_value, dict) else str(return_value), - } - attributes.update( - _extract_attributes_from_mapping( - resp_attrs, + # Handle response/return value + if return_value is not None: + if hasattr(return_value, "role"): + # ChatMessage object + attributes.update( { - MessageAttributes.COMPLETION_CONTENT.format(i=0): "content", - }, + MessageAttributes.COMPLETION_ROLE.format(i=0): return_value.role, + MessageAttributes.COMPLETION_CONTENT.format(i=0): return_value.content or "", + } ) - ) + + # Handle tool calls in response + if hasattr(return_value, "tool_calls") and return_value.tool_calls: + for j, tool_call in enumerate(return_value.tool_calls): + attributes.update( + { + MessageAttributes.COMPLETION_TOOL_CALL_ID.format(i=0, j=j): tool_call.id, + MessageAttributes.COMPLETION_TOOL_CALL_TYPE.format(i=0, j=j): tool_call.type, + MessageAttributes.COMPLETION_TOOL_CALL_NAME.format(i=0, j=j): tool_call.function.name, + MessageAttributes.COMPLETION_TOOL_CALL_ARGUMENTS.format(i=0, j=j): json.dumps( + tool_call.function.arguments + ), + } + ) + + # Token usage + if hasattr(return_value, "token_usage") and return_value.token_usage: + attributes.update( + { + SpanAttributes.LLM_USAGE_PROMPT_TOKENS: return_value.token_usage.input_tokens, + SpanAttributes.LLM_USAGE_COMPLETION_TOKENS: return_value.token_usage.output_tokens, + SpanAttributes.LLM_USAGE_TOTAL_TOKENS: ( + return_value.token_usage.input_tokens + return_value.token_usage.output_tokens + ), + } + ) + + # Response ID + if hasattr(return_value, "raw") and return_value.raw and hasattr(return_value.raw, "id"): + attributes[SpanAttributes.LLM_RESPONSE_ID] = return_value.raw.id + + elif isinstance(return_value, dict): + # Handle dict response + attributes[MessageAttributes.COMPLETION_CONTENT.format(i=0)] = json.dumps(return_value) return attributes @@ -91,47 +175,56 @@ def get_stream_attributes( attributes = get_common_attributes() # Extract model info from instance - if args and len(args) > 1: - instance = args[1] - model_id = instance.model_id if hasattr(instance, "model_id") else "unknown" + if args and len(args) > 0: + instance = args[0] + model_id = getattr(instance, "model_id", "unknown") + attributes.update( { - "gen_ai.model.id": model_id, - "gen_ai.model.name": model_id.split("/")[-1] if "/" in model_id else model_id, + SpanAttributes.LLM_REQUEST_MODEL: model_id, + SpanAttributes.LLM_SYSTEM: instance.__class__.__name__, + SpanAttributes.LLM_REQUEST_STREAMING: True, } ) - # Extract messages from kwargs - if kwargs: - messages = kwargs.get("messages", []) - if messages: - for i, msg in enumerate(messages): - msg_attrs = { - "role": msg.get("role", "unknown"), - "content": msg.get("content", ""), - } + # Extract messages from args/kwargs + messages = None + if args and len(args) > 1: + messages = args[1] + elif kwargs and "messages" in kwargs: + messages = kwargs["messages"] + + if messages: + # Process prompt messages (same as non-streaming) + for i, msg in enumerate(messages): + if isinstance(msg, dict): + role = msg.get("role", "unknown") + content = msg.get("content", "") + + # Handle content that might be a list + if isinstance(content, list): + text_content = "" + for item in content: + if isinstance(item, dict) and item.get("type") == "text": + text_content += item.get("text", "") + content = text_content + attributes.update( - _extract_attributes_from_mapping( - msg_attrs, - { - MessageAttributes.PROMPT_ROLE.format(i=i): "role", - MessageAttributes.PROMPT_CONTENT.format(i=i): "content", - }, - ) + { + MessageAttributes.PROMPT_ROLE.format(i=i): role, + MessageAttributes.PROMPT_CONTENT.format(i=i): content, + MessageAttributes.PROMPT_TYPE.format(i=i): "text", + } ) - # Add chunk info if available - if return_value: - chunk_attrs = { - "content": return_value.get("content", "") if isinstance(return_value, dict) else str(return_value), - } - attributes.update( - _extract_attributes_from_mapping( - chunk_attrs, - { - MessageAttributes.COMPLETION_CONTENT.format(i=0): "content", - }, - ) - ) + # Extract streaming-specific parameters + if kwargs: + stop_sequences = kwargs.get("stop_sequences") + if stop_sequences: + attributes[SpanAttributes.LLM_REQUEST_STOP_SEQUENCES] = json.dumps(stop_sequences) + + # Note: For streaming, the return_value is typically a generator + # Individual chunks would need to be tracked separately + attributes["llm.response.is_stream"] = True return attributes diff --git a/agentops/instrumentation/smolagents/instrumentor.py b/agentops/instrumentation/smolagents/instrumentor.py index eeedd3e0c..b0930fe35 100644 --- a/agentops/instrumentation/smolagents/instrumentor.py +++ b/agentops/instrumentation/smolagents/instrumentor.py @@ -5,12 +5,124 @@ from opentelemetry.trace import get_tracer, SpanKind from agentops.instrumentation.common.wrappers import WrapConfig, wrap, unwrap -from agentops.instrumentation.smolagents import LIBRARY_VERSION -from agentops.instrumentation.smolagents.attributes.agent import ( - get_agent_attributes, - get_tool_call_attributes, - get_planning_step_attributes, -) + +# Define LIBRARY_VERSION directly to avoid circular import +LIBRARY_VERSION = "1.16.0" + +# Skip short-duration or redundant spans +SKIP_SHORT_DURATION_SPANS = True +MIN_SPAN_DURATION_MS = 100 # Skip spans shorter than 100ms + + +# Dynamic span naming functions +def get_agent_span_name(args, kwargs, instance=None): + """Generate dynamic span name for agent operations.""" + if not instance and args and len(args) > 0: + instance = args[0] + + if instance: + agent_type = instance.__class__.__name__.replace("Agent", "").lower() + task = kwargs.get("task", "") if kwargs else "" + if task and len(task) > 50: + task = task[:50] + "..." + if task: + return f"agent.{agent_type}({task})" + return f"agent.{agent_type}.run" + return "agent.run" + + +def get_llm_span_name(args, kwargs, instance=None): + """Generate dynamic span name for LLM operations with model name.""" + if not instance and args and len(args) > 0: + instance = args[0] + + model_name = "unknown" + if instance: + if hasattr(instance, "model_id"): + model_name = instance.model_id + elif hasattr(instance, "__class__"): + model_name = instance.__class__.__name__ + + # Clean up model name for display + if model_name.startswith("openai/"): + model_name = model_name[7:] # Remove 'openai/' prefix + elif "/" in model_name: + model_name = model_name.split("/")[-1] # Take last part of path + + if kwargs and "stream" in kwargs and kwargs["stream"]: + return f"llm.generate_stream({model_name})" + return f"llm.generate({model_name})" + + +def get_tool_span_name(args, kwargs, instance=None): + """Generate dynamic span name for tool operations.""" + if not instance and args and len(args) > 0: + instance = args[0] + + tool_name = "unknown" + if instance: + if hasattr(instance, "name"): + tool_name = instance.name + elif hasattr(instance, "__class__"): + tool_name = instance.__class__.__name__ + + # If there's a tool_call argument, extract the tool name + if kwargs: + tool_call = kwargs.get("tool_call") + if tool_call and hasattr(tool_call, "name"): + tool_name = tool_call.name + elif tool_call and hasattr(tool_call, "function") and hasattr(tool_call.function, "name"): + tool_name = tool_call.function.name + + return f"tool.{tool_name}" + + +# Import attribute handlers +try: + from agentops.instrumentation.smolagents.attributes.agent import ( + get_agent_attributes, + get_tool_call_attributes, + get_planning_step_attributes, + get_agent_step_attributes, + get_agent_stream_attributes, + get_managed_agent_attributes, + ) + from agentops.instrumentation.smolagents.attributes.model import ( + get_model_attributes, + get_model_stream_attributes, + ) + from agentops.instrumentation.smolagents.stream_wrapper import SmoLAgentsStreamWrapper +except ImportError as e: + print(f"🖇 AgentOps: Error importing smolagents attributes: {e}") + + # Fallback functions + def get_agent_attributes(*args, **kwargs): + return {} + + def get_tool_call_attributes(*args, **kwargs): + return {} + + def get_planning_step_attributes(*args, **kwargs): + return {} + + def get_agent_step_attributes(*args, **kwargs): + return {} + + def get_agent_stream_attributes(*args, **kwargs): + return {} + + def get_managed_agent_attributes(*args, **kwargs): + return {} + + def get_model_attributes(*args, **kwargs): + return {} + + def get_model_stream_attributes(*args, **kwargs): + return {} + + class SmoLAgentsStreamWrapper: + def __init__(self, *args, **kwargs): + pass class SmoLAgentsInstrumentor(BaseInstrumentor): @@ -25,108 +137,135 @@ def instrumentation_dependencies(self) -> Collection[str]: return [] def _instrument(self, **kwargs): - """Instrument SmoLAgents. + """Instrument SmoLAgents library.""" + try: + import smolagents # noqa: F401 + except ImportError: + print("🖇 AgentOps: SmoLAgents not found - skipping instrumentation") + return - Args: - **kwargs: Instrumentation options - """ - tracer = get_tracer( - __name__, - LIBRARY_VERSION, - schema_url="https://opentelemetry.io/schemas/1.11.0", - ) + tracer = get_tracer(__name__, LIBRARY_VERSION) - # Instrument ToolCallingAgent + # ========================= + # Core agent instrumentation with improved naming + # ========================= + + # Instrument main agent run method - primary agent execution spans wrap( WrapConfig( - trace_name="tool_calling_agent.run", + trace_name=get_agent_span_name, package="smolagents.agents", - class_name="ToolCallingAgent", + class_name="MultiStepAgent", method_name="run", handler=get_agent_attributes, - span_kind=SpanKind.CLIENT, + span_kind=SpanKind.INTERNAL, ), tracer=tracer, ) - # Instrument CodeAgent + # Skip redundant agent.run_stream spans (they're typically very short) + # Only instrument if not already covered by .run method + + # ========================= + # Tool execution instrumentation with better naming + # ========================= + + # Primary tool execution spans with descriptive names wrap( WrapConfig( - trace_name="code_agent.run", + trace_name=get_tool_span_name, package="smolagents.agents", - class_name="CodeAgent", - method_name="run", - handler=get_agent_attributes, + class_name="ToolCallingAgent", + method_name="execute_tool_call", + handler=get_tool_call_attributes, span_kind=SpanKind.CLIENT, ), tracer=tracer, ) - # Instrument tool execution + # Skip redundant tool.execute spans (they add minimal value over execute_tool_call) + # The Tool.__call__ method creates very short spans that just wrap the actual execution + + # ========================= + # LLM instrumentation with model names + # ========================= + + # Primary LLM generation spans with model names in span title wrap( WrapConfig( - trace_name="tool.execute", - package="smolagents.tools", - class_name="Tool", - method_name="__call__", - handler=get_tool_call_attributes, + trace_name=get_llm_span_name, + package="smolagents.models", + class_name="LiteLLMModel", + method_name="generate", + handler=get_model_attributes, span_kind=SpanKind.CLIENT, ), tracer=tracer, ) - # Instrument planning steps + # LLM streaming with model names wrap( WrapConfig( - trace_name="tool_calling_agent.plan", - package="smolagents.agents", - class_name="ToolCallingAgent", - method_name="_generate_planning_step", - handler=get_planning_step_attributes, + trace_name=get_llm_span_name, + package="smolagents.models", + class_name="LiteLLMModel", + method_name="generate_stream", + handler=get_model_stream_attributes, span_kind=SpanKind.CLIENT, ), tracer=tracer, ) - def _uninstrument(self, **kwargs): - """Remove SmoLAgents instrumentation. + # ========================= + # Agent step instrumentation (selective) + # ========================= - Args: - **kwargs: Uninstrumentation options - """ - unwrap( - WrapConfig( - trace_name="tool_calling_agent.run", - package="smolagents.agents", - class_name="ToolCallingAgent", - method_name="run", - handler=get_agent_attributes, - ) - ) - unwrap( + # Only instrument step execution if it provides meaningful context + # Skip very short step_stream spans that just wrap other operations + wrap( WrapConfig( - trace_name="code_agent.run", + trace_name=lambda args, kwargs: f"agent.step_{kwargs.get('step_number', 'unknown')}", package="smolagents.agents", - class_name="CodeAgent", - method_name="run", - handler=get_agent_attributes, - ) - ) - unwrap( - WrapConfig( - trace_name="tool.execute", - package="smolagents.tools", - class_name="Tool", - method_name="__call__", - handler=get_tool_call_attributes, - ) + class_name="MultiStepAgent", + method_name="step", + handler=get_agent_step_attributes, + span_kind=SpanKind.INTERNAL, + ), + tracer=tracer, ) - unwrap( + + # ========================= + # Managed agent instrumentation + # ========================= + + # For multi-agent workflows + wrap( WrapConfig( - trace_name="tool_calling_agent.plan", + trace_name=lambda args, kwargs: f"agent.managed_call({kwargs.get('agent_name', 'unknown')})", package="smolagents.agents", - class_name="ToolCallingAgent", - method_name="_generate_planning_step", - handler=get_planning_step_attributes, - ) + class_name="MultiStepAgent", + method_name="managed_call", + handler=get_managed_agent_attributes, + span_kind=SpanKind.INTERNAL, + ), + tracer=tracer, ) + + # Note: Removed memory instrumentation due to class structure differences + # in smolagents.memory module + + def _uninstrument(self, **kwargs): + """Uninstrument SmoLAgents. + + Args: + **kwargs: Uninstrumentation options + """ + # Uninstrument agent methods + unwrap("smolagents.agents", "MultiStepAgent.run") + unwrap("smolagents.agents", "MultiStepAgent._generate_planning_step") + unwrap("smolagents.agents", "ToolCallingAgent.execute_tool_call") + unwrap("smolagents.agents", "MultiStepAgent.__call__") + + # Uninstrument model methods + unwrap("smolagents.models", "LiteLLMModel.generate") + unwrap("smolagents.models", "LiteLLMModel.generate_stream") diff --git a/agentops/instrumentation/smolagents/stream_wrapper.py b/agentops/instrumentation/smolagents/stream_wrapper.py index fa1768404..fb33a5473 100644 --- a/agentops/instrumentation/smolagents/stream_wrapper.py +++ b/agentops/instrumentation/smolagents/stream_wrapper.py @@ -2,13 +2,14 @@ import time import uuid - -from opentelemetry.trace import Status, StatusCode +from typing import Any, Generator, Optional +from opentelemetry.trace import Status, StatusCode, Span from agentops.semconv.message import MessageAttributes from agentops.semconv.agent import AgentAttributes from agentops.semconv.tool import ToolAttributes from .attributes.model import get_stream_attributes +from agentops.semconv.span_attributes import SpanAttributes def model_stream_wrapper(tracer): @@ -123,3 +124,135 @@ def wrapper(wrapped, instance, args, kwargs): raise return wrapper + + +class SmoLAgentsStreamWrapper: + """Wrapper for streaming responses from SmoLAgents models.""" + + def __init__( + self, + stream: Generator, + span: Span, + model_id: Optional[str] = None, + ): + """Initialize the stream wrapper. + + Args: + stream: The original generator from the model + span: The OpenTelemetry span to track the stream + model_id: Optional model identifier + """ + self._stream = stream + self._span = span + self._model_id = model_id + self._chunks_received = 0 + self._full_content = [] + self._tool_calls = [] + self._current_tool_call = None + self._token_count = 0 + + def __iter__(self): + """Iterate over the stream.""" + return self + + def __next__(self): + """Get the next chunk from the stream.""" + try: + chunk = next(self._stream) + self._process_chunk(chunk) + return chunk + except StopIteration: + self._finalize_stream() + raise + + def _process_chunk(self, chunk: Any) -> None: + """Process a chunk from the stream. + + Args: + chunk: The chunk to process + """ + self._chunks_received += 1 + + # Handle ChatMessageStreamDelta objects + if hasattr(chunk, "content") and chunk.content: + self._full_content.append(chunk.content) + + # Handle tool calls in chunks + if hasattr(chunk, "tool_calls") and chunk.tool_calls: + for tool_call in chunk.tool_calls: + if tool_call.id not in [tc["id"] for tc in self._tool_calls]: + self._tool_calls.append( + { + "id": tool_call.id, + "type": tool_call.type, + "name": tool_call.function.name, + "arguments": tool_call.function.arguments, + } + ) + + # Track token usage if available + if hasattr(chunk, "token_usage") and chunk.token_usage: + if hasattr(chunk.token_usage, "output_tokens"): + self._token_count += chunk.token_usage.output_tokens + + # Update span with chunk information + self._span.add_event( + "stream_chunk_received", + { + "chunk_number": self._chunks_received, + "chunk_content_length": len(chunk.content) if hasattr(chunk, "content") and chunk.content else 0, + }, + ) + + def _finalize_stream(self) -> None: + """Finalize the stream and update span attributes.""" + # Combine all content chunks + full_content = "".join(self._full_content) + + # Set final attributes on the span + attributes = { + MessageAttributes.COMPLETION_CONTENT.format(i=0): full_content, + "stream.chunks_received": self._chunks_received, + "stream.total_content_length": len(full_content), + } + + # Add tool calls if any + if self._tool_calls: + for j, tool_call in enumerate(self._tool_calls): + attributes.update( + { + MessageAttributes.COMPLETION_TOOL_CALL_ID.format(i=0, j=j): tool_call["id"], + MessageAttributes.COMPLETION_TOOL_CALL_TYPE.format(i=0, j=j): tool_call["type"], + MessageAttributes.COMPLETION_TOOL_CALL_NAME.format(i=0, j=j): tool_call["name"], + MessageAttributes.COMPLETION_TOOL_CALL_ARGUMENTS.format(i=0, j=j): str(tool_call["arguments"]), + } + ) + + # Add token usage if tracked + if self._token_count > 0: + attributes[SpanAttributes.LLM_USAGE_STREAMING_TOKENS] = self._token_count + + self._span.set_attributes(attributes) + + def close(self) -> None: + """Close the stream wrapper.""" + if hasattr(self._stream, "close"): + self._stream.close() + + +def wrap_stream( + stream: Generator, + span: Span, + model_id: Optional[str] = None, +) -> SmoLAgentsStreamWrapper: + """Wrap a streaming response from a SmoLAgents model. + + Args: + stream: The original generator from the model + span: The OpenTelemetry span to track the stream + model_id: Optional model identifier + + Returns: + SmoLAgentsStreamWrapper: The wrapped stream + """ + return SmoLAgentsStreamWrapper(stream, span, model_id) From 70c420465c819ed91631b13da760fe8549ad7386 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Fri, 30 May 2025 21:11:02 +0530 Subject: [PATCH 04/13] Refactor multi_smolagents_system notebook to streamline agent management. Removed ManagedAgent and adjusted CodeAgent to directly manage web_agent, enhancing clarity and simplifying the agent structure. --- .../smolagents_examples/multi_smolagents_system.ipynb | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/examples/smolagents_examples/multi_smolagents_system.ipynb b/examples/smolagents_examples/multi_smolagents_system.ipynb index cc2806e06..41d18bcf1 100644 --- a/examples/smolagents_examples/multi_smolagents_system.ipynb +++ b/examples/smolagents_examples/multi_smolagents_system.ipynb @@ -188,18 +188,12 @@ "from smolagents import (\n", " CodeAgent,\n", " ToolCallingAgent,\n", - " ManagedAgent,\n", " DuckDuckGoSearchTool,\n", ")\n", "\n", "web_agent = ToolCallingAgent(\n", " tools=[DuckDuckGoSearchTool(), visit_webpage],\n", " model=model,\n", - " max_iterations=10,\n", - ")\n", - "\n", - "managed_web_agent = ManagedAgent(\n", - " agent=web_agent,\n", " name=\"search\",\n", " description=\"Runs web searches for you. Give it your query as an argument.\",\n", ")\n", @@ -207,7 +201,7 @@ "manager_agent = CodeAgent(\n", " tools=[],\n", " model=model,\n", - " managed_agents=[managed_web_agent],\n", + " managed_agents=[web_agent],\n", " additional_authorized_imports=[\"time\", \"numpy\", \"pandas\"],\n", ")" ] From 67d508d502c500f9311c64b5556e8e3c71d3ad1c Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Fri, 30 May 2025 21:13:56 +0530 Subject: [PATCH 05/13] Add a blank line for improved readability in `__init__.py` --- agentops/sdk/decorators/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/agentops/sdk/decorators/__init__.py b/agentops/sdk/decorators/__init__.py index f775b45d5..608b23908 100644 --- a/agentops/sdk/decorators/__init__.py +++ b/agentops/sdk/decorators/__init__.py @@ -20,6 +20,7 @@ tool = create_entity_decorator(SpanKind.TOOL) operation = task + # For backward compatibility: @session decorator calls @trace decorator @functools.wraps(trace) def session(*args, **kwargs): From c4ae09bd4764a1818e618b47db1f23583c0e62ba Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Sat, 31 May 2025 00:42:03 +0530 Subject: [PATCH 06/13] Refactor SmoLAgents instrumentation for improved clarity and consistency. Updated class names and removed unused dynamic span naming functions. Simplified attribute extraction for agents and models, enhancing observability in telemetry data. --- agentops/instrumentation/__init__.py | 2 +- agentops/instrumentation/common/wrappers.py | 38 +- .../instrumentation/smolagents/__init__.py | 4 +- .../smolagents/attributes/agent.py | 510 ++++++++---------- .../smolagents/attributes/model.py | 341 ++++++------ .../smolagents/instrumentor.py | 391 +++++++------- 6 files changed, 577 insertions(+), 709 deletions(-) diff --git a/agentops/instrumentation/__init__.py b/agentops/instrumentation/__init__.py index 9c5bf1d34..4fa7fba9d 100644 --- a/agentops/instrumentation/__init__.py +++ b/agentops/instrumentation/__init__.py @@ -210,7 +210,7 @@ class InstrumentorConfig(TypedDict): }, "smolagents": { "module_name": "agentops.instrumentation.smolagents", - "class_name": "SmoLAgentsInstrumentor", + "class_name": "SmolAgentsInstrumentor", "min_version": "1.8.0", }, } diff --git a/agentops/instrumentation/common/wrappers.py b/agentops/instrumentation/common/wrappers.py index ac3c551b1..c1127b8f9 100644 --- a/agentops/instrumentation/common/wrappers.py +++ b/agentops/instrumentation/common/wrappers.py @@ -6,7 +6,7 @@ spans with attributes, and functions for creating and applying wrappers. """ -from typing import Any, Optional, Tuple, Dict, Callable, Union +from typing import Any, Optional, Tuple, Dict, Callable from dataclasses import dataclass import logging from wrapt import wrap_function_wrapper # type: ignore @@ -21,7 +21,6 @@ logger = logging.getLogger(__name__) AttributeHandler = Callable[[Optional[Tuple], Optional[Dict], Optional[Any]], AttributeMap] -DynamicTraceName = Callable[[Optional[Tuple], Optional[Dict], Optional[Any]], str] @dataclass @@ -33,7 +32,7 @@ class WrapConfig: to set, and how to name the resulting trace spans. Attributes: - trace_name: The name to use for the trace span (can be string or callable) + trace_name: The name to use for the trace span package: The package containing the target class class_name: The name of the class containing the method method_name: The name of the method to wrap @@ -44,7 +43,7 @@ class WrapConfig: span_kind: The kind of span to create (default: CLIENT) """ - trace_name: Union[str, DynamicTraceName] + trace_name: str package: str class_name: str method_name: str @@ -56,33 +55,6 @@ def __repr__(self): return f"{self.package}.{self.class_name}.{self.method_name}" -def _get_span_name( - trace_name: Union[str, DynamicTraceName], - args: Optional[Tuple], - kwargs: Optional[Dict], - instance: Optional[Any] = None, -) -> str: - """Get the span name, handling both static strings and dynamic functions. - - Args: - trace_name: The trace name (string or callable) - args: Optional tuple of positional arguments - kwargs: Optional dict of keyword arguments - instance: Optional instance object - - Returns: - The span name to use - """ - if callable(trace_name): - try: - return trace_name(args, kwargs, instance) - except Exception as e: - logger.warning(f"Failed to generate dynamic span name: {e}") - return "unknown_operation" - else: - return trace_name - - def _update_span(span: Span, attributes: AttributeMap) -> None: """Update a span with the provided attributes. @@ -142,7 +114,7 @@ async def awrapper(wrapped, instance, args, kwargs): return_value = None with tracer.start_as_current_span( - _get_span_name(wrap_config.trace_name, args, kwargs, instance), + wrap_config.trace_name, kind=wrap_config.span_kind, ) as span: try: @@ -174,7 +146,7 @@ def wrapper(wrapped, instance, args, kwargs): return_value = None with tracer.start_as_current_span( - _get_span_name(wrap_config.trace_name, args, kwargs, instance), + wrap_config.trace_name, kind=wrap_config.span_kind, ) as span: try: diff --git a/agentops/instrumentation/smolagents/__init__.py b/agentops/instrumentation/smolagents/__init__.py index 46e6831dc..7eeda90f7 100644 --- a/agentops/instrumentation/smolagents/__init__.py +++ b/agentops/instrumentation/smolagents/__init__.py @@ -3,6 +3,6 @@ LIBRARY_NAME = "smolagents" LIBRARY_VERSION = "1.16.0" -from agentops.instrumentation.smolagents.instrumentor import SmoLAgentsInstrumentor # noqa: E402 +from agentops.instrumentation.smolagents.instrumentor import SmolAgentsInstrumentor # noqa: E402 -__all__ = ["SmoLAgentsInstrumentor"] +__all__ = ["SmolAgentsInstrumentor"] diff --git a/agentops/instrumentation/smolagents/attributes/agent.py b/agentops/instrumentation/smolagents/attributes/agent.py index 71e5e7c15..d0f053a54 100644 --- a/agentops/instrumentation/smolagents/attributes/agent.py +++ b/agentops/instrumentation/smolagents/attributes/agent.py @@ -7,8 +7,6 @@ from agentops.instrumentation.common.attributes import get_common_attributes from agentops.semconv.agent import AgentAttributes from agentops.semconv.tool import ToolAttributes -from agentops.semconv.message import MessageAttributes -from agentops.semconv.span_attributes import SpanAttributes def get_agent_attributes( @@ -16,91 +14,79 @@ def get_agent_attributes( kwargs: Optional[Dict] = None, return_value: Optional[Any] = None, ) -> Dict[str, Any]: - """Extract attributes from an agent execution. + """Extract attributes from an agent execution call. Args: args: Optional tuple of positional arguments kwargs: Optional dict of keyword arguments - return_value: Optional return value from the wrapped function + return_value: Optional return value from the function Returns: - Dict containing extracted attributes + Dictionary of extracted attributes """ attributes = get_common_attributes() - # Extract agent info from instance - if args and len(args) > 0: - instance = args[0] - agent_type = instance.__class__.__name__ - - # Get tools and managed agents - tools = [] - if hasattr(instance, "tools") and instance.tools: - tools = list(instance.tools.keys()) - managed_agents = [] - if hasattr(instance, "managed_agents") and instance.managed_agents: - managed_agents = list(instance.managed_agents.keys()) - - # Get model info - model_id = None - if hasattr(instance, "model") and hasattr(instance.model, "model_id"): - model_id = instance.model.model_id - - attributes.update( - { - AgentAttributes.AGENT_ID: str(uuid.uuid4()), - AgentAttributes.AGENT_NAME: agent_type, - AgentAttributes.AGENT_ROLE: "executor", - AgentAttributes.AGENT_TOOLS: tools, - "agent.managed_agents": managed_agents, - } - ) - - # Only add attributes if they have non-None values - max_steps = getattr(instance, "max_steps", None) - if max_steps is not None: - attributes["agent.max_steps"] = max_steps - - planning_interval = getattr(instance, "planning_interval", None) - if planning_interval is not None: - attributes["agent.planning_interval"] = planning_interval - - if model_id: - attributes[SpanAttributes.LLM_REQUEST_MODEL] = model_id - - # Extract task from kwargs or args - if kwargs: - task = kwargs.get("task") - stream = kwargs.get("stream", False) - reset = kwargs.get("reset", True) - max_steps = kwargs.get("max_steps") - additional_args = kwargs.get("additional_args") - - if task: - attributes[AgentAttributes.AGENT_REASONING] = task - attributes["agent.stream_mode"] = stream - attributes["agent.reset"] = reset - if max_steps is not None: - attributes["agent.max_steps_override"] = max_steps - if additional_args: - attributes["agent.additional_args"] = json.dumps(additional_args) - elif args and len(args) > 1: - attributes[AgentAttributes.AGENT_REASONING] = args[1] - - # Handle return value for full result mode - if return_value is not None: - if hasattr(return_value, "output"): - # RunResult object - attributes[SpanAttributes.AGENTOPS_ENTITY_OUTPUT] = str(return_value.output) - attributes["agent.result_state"] = return_value.state - if return_value.token_usage: - attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = return_value.token_usage.input_tokens - attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = return_value.token_usage.output_tokens - attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = ( - return_value.token_usage.input_tokens + return_value.token_usage.output_tokens - ) - else: - attributes[SpanAttributes.AGENTOPS_ENTITY_OUTPUT] = str(return_value) + try: + # Extract agent instance information + agent_instance = None + if args and len(args) > 0: + agent_instance = args[0] + elif kwargs and "self" in kwargs: + agent_instance = kwargs["self"] + + if agent_instance: + # Extract agent name + agent_name = getattr(agent_instance, "name", agent_instance.__class__.__name__) + attributes[AgentAttributes.AGENT_NAME] = agent_name + + # Generate agent ID if not present + agent_id = getattr(agent_instance, "id", str(uuid.uuid4())) + attributes[AgentAttributes.AGENT_ID] = agent_id + + # Extract agent role/type + attributes[AgentAttributes.AGENT_ROLE] = "executor" + + # Extract tools information + tools = getattr(agent_instance, "tools", []) + if tools: + tool_names = [] + for tool in tools: + tool_name = getattr(tool, "name", str(tool)) + tool_names.append(tool_name) + attributes[AgentAttributes.AGENT_TOOLS] = json.dumps(tool_names) + else: + attributes[AgentAttributes.AGENT_TOOLS] = "[]" + + # Extract managed agents information + managed_agents = getattr(agent_instance, "managed_agents", []) + if managed_agents: + managed_agent_names = [] + for managed_agent in managed_agents: + agent_name = getattr(managed_agent, "name", managed_agent.__class__.__name__) + managed_agent_names.append(agent_name) + attributes[AgentAttributes.AGENT_MANAGED_AGENTS] = json.dumps(managed_agent_names) + else: + attributes[AgentAttributes.AGENT_MANAGED_AGENTS] = "[]" + + # Extract input/task from args or kwargs + task_input = None + if args and len(args) > 1: + task_input = args[1] + elif kwargs and "task" in kwargs: + task_input = kwargs["task"] + elif kwargs and "prompt" in kwargs: + task_input = kwargs["prompt"] + + if task_input: + attributes["agent.task"] = str(task_input) + + # Extract return value/output + if return_value is not None: + attributes["agentops.entity.output"] = str(return_value) + + except Exception: + # If extraction fails, continue with basic attributes + pass return attributes @@ -110,40 +96,42 @@ def get_agent_stream_attributes( kwargs: Optional[Dict] = None, return_value: Optional[Any] = None, ) -> Dict[str, Any]: - """Extract attributes from agent streaming execution. + """Extract attributes from an agent streaming call. Args: args: Optional tuple of positional arguments kwargs: Optional dict of keyword arguments - return_value: Optional return value from the wrapped function + return_value: Optional return value from the function Returns: - Dict containing extracted attributes + Dictionary of extracted attributes """ attributes = get_common_attributes() - # Extract agent info from instance - if args and len(args) > 0: - instance = args[0] - agent_type = instance.__class__.__name__ + try: + # Extract reasoning/task information + if kwargs: + if "max_steps" in kwargs: + attributes["agent.max_steps"] = str(kwargs["max_steps"]) - attributes.update( - { - AgentAttributes.AGENT_NAME: agent_type, - AgentAttributes.AGENT_ROLE: "executor", - "agent.stream_mode": True, - } - ) + # Extract task/reasoning from various parameter names + task_info = None + for param_name in ["task", "prompt", "reasoning", "query"]: + if param_name in kwargs: + task_info = kwargs[param_name] + break - # Extract task and parameters - if kwargs: - task = kwargs.get("task") - max_steps = kwargs.get("max_steps") + if task_info: + attributes["agent.reasoning"] = str(task_info) - if task: - attributes[AgentAttributes.AGENT_REASONING] = task - if max_steps: - attributes["agent.max_steps"] = max_steps + # Extract from args + if args and len(args) > 1: + task_info = args[1] + attributes["agent.reasoning"] = str(task_info) + + except Exception: + # If extraction fails, continue with basic attributes + pass return attributes @@ -158,48 +146,30 @@ def get_agent_step_attributes( Args: args: Optional tuple of positional arguments kwargs: Optional dict of keyword arguments - return_value: Optional return value from the wrapped function + return_value: Optional return value from the function Returns: - Dict containing extracted attributes + Dictionary of extracted attributes """ attributes = get_common_attributes() - # Extract agent info from instance - if args and len(args) > 0: - instance = args[0] - agent_type = instance.__class__.__name__ - - attributes.update( - { - AgentAttributes.AGENT_NAME: agent_type, - "agent.step_number": getattr(instance, "step_number", 0), - } - ) - - # Extract memory step info - if args and len(args) > 1: - memory_step = args[1] - if hasattr(memory_step, "step_number"): - attributes["step.number"] = memory_step.step_number - if hasattr(memory_step, "tool_calls") and memory_step.tool_calls: - # Extract tool call info - for i, tool_call in enumerate(memory_step.tool_calls): - attributes.update( - { - MessageAttributes.TOOL_CALL_ID.format(i=i): tool_call.id, - MessageAttributes.TOOL_CALL_NAME.format(i=i): tool_call.name, - MessageAttributes.TOOL_CALL_ARGUMENTS.format(i=i): json.dumps(tool_call.arguments), - } - ) - if hasattr(memory_step, "error") and memory_step.error: - attributes["step.error"] = str(memory_step.error) - if hasattr(memory_step, "observations"): - attributes["step.observations"] = str(memory_step.observations) - - # Handle return value - if return_value is not None: - attributes[SpanAttributes.AGENTOPS_ENTITY_OUTPUT] = str(return_value) + try: + # Try to extract step information + step_number = getattr(args[0] if args else None, "step_count", None) + if step_number is not None: + attributes["agent.step_number"] = str(step_number) + + # Extract step name/type + step_name = "ActionStep" # Default for smolagents + attributes["agent.name"] = step_name + + # Extract return value + if return_value is not None: + attributes["agentops.entity.output"] = str(return_value) + + except Exception: + # If extraction fails, continue with basic attributes + pass return attributes @@ -209,90 +179,77 @@ def get_tool_call_attributes( kwargs: Optional[Dict] = None, return_value: Optional[Any] = None, ) -> Dict[str, Any]: - """Extract attributes from a tool call. + """Extract attributes from a tool call execution. Args: args: Optional tuple of positional arguments kwargs: Optional dict of keyword arguments - return_value: Optional return value from the wrapped function + return_value: Optional return value from the function Returns: - Dict containing extracted attributes + Dictionary of extracted attributes """ attributes = get_common_attributes() - # Extract tool call information - tool_call = None - if kwargs and "tool_call" in kwargs: - tool_call = kwargs["tool_call"] - elif args and len(args) > 1: - tool_call = args[1] - - if tool_call: - # Extract tool call details + try: + # Generate tool execution ID tool_id = str(uuid.uuid4()) + attributes[ToolAttributes.TOOL_ID] = tool_id + + # Extract tool information from various sources tool_name = "unknown" - tool_arguments = {} - - if hasattr(tool_call, "id"): - tool_id = tool_call.id - if hasattr(tool_call, "name"): - tool_name = tool_call.name - elif hasattr(tool_call, "function") and hasattr(tool_call.function, "name"): - tool_name = tool_call.function.name - - if hasattr(tool_call, "arguments"): - tool_arguments = tool_call.arguments - elif hasattr(tool_call, "function") and hasattr(tool_call.function, "arguments"): - try: - tool_arguments = ( - json.loads(tool_call.function.arguments) - if isinstance(tool_call.function.arguments, str) - else tool_call.function.arguments - ) - except (json.JSONDecodeError, TypeError): - tool_arguments = {"raw": str(tool_call.function.arguments)} - - attributes.update( - { - ToolAttributes.TOOL_ID: tool_id, - ToolAttributes.TOOL_NAME: tool_name, - ToolAttributes.TOOL_PARAMETERS: json.dumps(tool_arguments), - ToolAttributes.TOOL_STATUS: "pending", - ToolAttributes.TOOL_DESCRIPTION: "unknown", - } - ) - - # Extract instance information for Tool.__call__ style calls - if args and len(args) > 0: - instance = args[0] - if hasattr(instance, "__class__") and instance.__class__.__name__ in ["Tool", "DuckDuckGoSearchTool"]: - tool_name = getattr(instance, "name", instance.__class__.__name__) - tool_description = getattr(instance, "description", "unknown") - - # Update attributes with instance info - attributes.update( - { - ToolAttributes.TOOL_NAME: tool_name, - ToolAttributes.TOOL_DESCRIPTION: tool_description, - } - ) - - # If there are additional args, they might be tool inputs - if len(args) > 1: - tool_inputs = {} - for i, arg in enumerate(args[1:], 1): - tool_inputs[f"arg_{i}"] = str(arg) - attributes[ToolAttributes.TOOL_PARAMETERS] = json.dumps(tool_inputs) - - # Handle return value - if return_value is not None: - attributes[ToolAttributes.TOOL_STATUS] = "success" - # Store the result if it's not too large - result_str = str(return_value) - if len(result_str) > 1000: - result_str = result_str[:1000] + "..." - attributes[ToolAttributes.TOOL_RESULT] = result_str + tool_description = "unknown" + tool_parameters = "{}" + + # Try to extract from instance (first arg) + if args and len(args) > 0: + instance = args[0] + if hasattr(instance, "name"): + tool_name = instance.name + if hasattr(instance, "description"): + tool_description = instance.description + + # Try to extract from kwargs + if kwargs: + if "tool_call" in kwargs: + tool_call = kwargs["tool_call"] + if hasattr(tool_call, "function"): + tool_name = tool_call.function.name + if hasattr(tool_call.function, "arguments"): + tool_parameters = tool_call.function.arguments + elif "name" in kwargs: + tool_name = kwargs["name"] + elif "function_name" in kwargs: + tool_name = kwargs["function_name"] + + # Extract parameters + if "parameters" in kwargs: + tool_parameters = json.dumps(kwargs["parameters"]) + elif "arguments" in kwargs: + tool_parameters = json.dumps(kwargs["arguments"]) + elif "args" in kwargs: + tool_parameters = json.dumps(kwargs["args"]) + + # Set tool attributes + attributes[ToolAttributes.TOOL_NAME] = tool_name + attributes[ToolAttributes.TOOL_DESCRIPTION] = tool_description + attributes[ToolAttributes.TOOL_PARAMETERS] = tool_parameters + attributes[ToolAttributes.TOOL_STATUS] = "pending" + attributes[ToolAttributes.TOOL_OUTPUT_TYPE] = "unknown" + attributes[ToolAttributes.TOOL_INPUTS] = "{}" + + # Extract return value + if return_value is not None: + attributes["tool.result"] = str(return_value) + attributes[ToolAttributes.TOOL_STATUS] = "success" + + except Exception: + # If extraction fails, set basic attributes + attributes[ToolAttributes.TOOL_NAME] = "unknown" + attributes[ToolAttributes.TOOL_DESCRIPTION] = "unknown" + attributes[ToolAttributes.TOOL_ID] = str(uuid.uuid4()) + attributes[ToolAttributes.TOOL_PARAMETERS] = "{}" + attributes[ToolAttributes.TOOL_STATUS] = "pending" return attributes @@ -302,51 +259,34 @@ def get_planning_step_attributes( kwargs: Optional[Dict] = None, return_value: Optional[Any] = None, ) -> Dict[str, Any]: - """Extract attributes from a planning step. + """Extract attributes from a planning step execution. Args: args: Optional tuple of positional arguments kwargs: Optional dict of keyword arguments - return_value: Optional return value from the wrapped function + return_value: Optional return value from the function Returns: - Dict containing extracted attributes + Dictionary of extracted attributes """ attributes = get_common_attributes() - # Extract agent info from instance - if args and len(args) > 0: - instance = args[0] - agent_type = instance.__class__.__name__ - - attributes.update( - { - AgentAttributes.AGENT_NAME: agent_type, - "planning.agent_type": agent_type, - } - ) - - # Extract planning step info from args - if args and len(args) > 1: - task = args[1] - attributes[AgentAttributes.AGENT_REASONING] = task - - # Extract kwargs - if kwargs: - is_first_step = kwargs.get("is_first_step", False) - step = kwargs.get("step", 0) - - attributes.update( - { - "planning.is_first_step": is_first_step, - "planning.step_number": step, - } - ) - - # Handle generator return value - if return_value is not None: - # The return value is typically a generator for planning steps - attributes["planning.status"] = "completed" + try: + # Extract planning information + if kwargs: + if "planning_step" in kwargs: + step = kwargs["planning_step"] + attributes["agent.planning.step"] = str(step) + if "reasoning" in kwargs: + attributes["agent.planning.reasoning"] = str(kwargs["reasoning"]) + + # Extract return value + if return_value is not None: + attributes["agentops.entity.output"] = str(return_value) + + except Exception: + # If extraction fails, continue with basic attributes + pass return attributes @@ -361,44 +301,54 @@ def get_managed_agent_attributes( Args: args: Optional tuple of positional arguments kwargs: Optional dict of keyword arguments - return_value: Optional return value from the wrapped function + return_value: Optional return value from the function Returns: - Dict containing extracted attributes + Dictionary of extracted attributes """ attributes = get_common_attributes() - # Extract agent info from instance - if args and len(args) > 0: - instance = args[0] - agent_type = instance.__class__.__name__ - agent_name = getattr(instance, "name", agent_type) - agent_description = getattr(instance, "description", "") - - attributes.update( - { - AgentAttributes.AGENT_ID: str(uuid.uuid4()), - AgentAttributes.AGENT_NAME: agent_name, - AgentAttributes.AGENT_ROLE: "managed", - "agent.type": agent_type, - "agent.description": agent_description, - "agent.provide_run_summary": getattr(instance, "provide_run_summary", False), - } - ) - - # Extract task - if args and len(args) > 1: - task = args[1] - attributes[AgentAttributes.AGENT_REASONING] = task - elif kwargs and "task" in kwargs: - attributes[AgentAttributes.AGENT_REASONING] = kwargs["task"] - - # Handle return value - if return_value is not None: - if isinstance(return_value, dict): - # Managed agent typically returns a dict with task and output - attributes[SpanAttributes.AGENTOPS_ENTITY_OUTPUT] = json.dumps(return_value) - else: - attributes[SpanAttributes.AGENTOPS_ENTITY_OUTPUT] = str(return_value) + try: + # Extract managed agent information + agent_instance = None + if args and len(args) > 0: + agent_instance = args[0] + elif kwargs and "agent" in kwargs: + agent_instance = kwargs["agent"] + + if agent_instance: + # Extract agent details + agent_name = getattr(agent_instance, "name", agent_instance.__class__.__name__) + agent_id = getattr(agent_instance, "id", str(uuid.uuid4())) + agent_description = getattr(agent_instance, "description", "") + + attributes[AgentAttributes.AGENT_NAME] = agent_name + attributes[AgentAttributes.AGENT_ID] = agent_id + attributes[AgentAttributes.AGENT_ROLE] = "managed" + attributes[AgentAttributes.AGENT_TYPE] = agent_instance.__class__.__name__ + + if agent_description: + attributes[AgentAttributes.AGENT_DESCRIPTION] = agent_description + + # Check if this agent provides run summaries + attributes["agent.provide_run_summary"] = "false" # Default for smolagents + + # Extract task information + task = None + if args and len(args) > 1: + task = args[1] + elif kwargs and "task" in kwargs: + task = kwargs["task"] + + if task: + attributes["agent.task"] = str(task) + + # Extract return value + if return_value is not None: + attributes["agentops.entity.output"] = str(return_value) + + except Exception: + # If extraction fails, continue with basic attributes + pass return attributes diff --git a/agentops/instrumentation/smolagents/attributes/model.py b/agentops/instrumentation/smolagents/attributes/model.py index 0f02f9685..15513babf 100644 --- a/agentops/instrumentation/smolagents/attributes/model.py +++ b/agentops/instrumentation/smolagents/attributes/model.py @@ -20,139 +20,121 @@ def get_model_attributes( Args: args: Optional tuple of positional arguments kwargs: Optional dict of keyword arguments - return_value: Optional return value from the wrapped function + return_value: Optional return value from the function Returns: - Dict containing extracted attributes + Dictionary of extracted attributes """ attributes = get_common_attributes() - # Extract model info from instance - if args and len(args) > 0: - instance = args[0] - model_id = getattr(instance, "model_id", "unknown") - - attributes.update( - { - SpanAttributes.LLM_REQUEST_MODEL: model_id, - SpanAttributes.LLM_SYSTEM: instance.__class__.__name__, - } - ) - - # Extract model-specific attributes - if hasattr(instance, "temperature"): - attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] = instance.temperature - if hasattr(instance, "max_tokens"): - attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] = instance.max_tokens - if hasattr(instance, "api_base"): - attributes[SpanAttributes.LLM_OPENAI_API_BASE] = instance.api_base - - # Extract messages from args/kwargs - messages = None - if args and len(args) > 1: - messages = args[1] - elif kwargs and "messages" in kwargs: - messages = kwargs["messages"] - - if messages: - # Process prompt messages - for i, msg in enumerate(messages): - # Handle different message formats - if isinstance(msg, dict): - role = msg.get("role", "unknown") - content = msg.get("content", "") - - # Handle content that might be a list (for multimodal) - if isinstance(content, list): - text_content = "" - for item in content: - if isinstance(item, dict) and item.get("type") == "text": - text_content += item.get("text", "") - content = text_content - - attributes.update( - { - MessageAttributes.PROMPT_ROLE.format(i=i): role, - MessageAttributes.PROMPT_CONTENT.format(i=i): content, - MessageAttributes.PROMPT_TYPE.format(i=i): "text", - } - ) - - # Add speaker if it's from an agent - if "name" in msg: - attributes[MessageAttributes.PROMPT_SPEAKER.format(i=i)] = msg["name"] - - # Extract other parameters from kwargs - if kwargs: - # Stop sequences - stop_sequences = kwargs.get("stop_sequences") - if stop_sequences: - attributes[SpanAttributes.LLM_REQUEST_STOP_SEQUENCES] = json.dumps(stop_sequences) - - # Response format - response_format = kwargs.get("response_format") - if response_format: - attributes["llm.request.response_format"] = json.dumps(response_format) - - # Tools - tools_to_call_from = kwargs.get("tools_to_call_from") - if tools_to_call_from: - tool_names = [tool.name for tool in tools_to_call_from] - attributes[SpanAttributes.LLM_REQUEST_FUNCTIONS] = json.dumps(tool_names) - - # Add detailed tool information - for i, tool in enumerate(tools_to_call_from): - attributes.update( - { - MessageAttributes.TOOL_CALL_NAME.format(i=i): tool.name, - MessageAttributes.TOOL_CALL_DESCRIPTION.format(i=i): tool.description, - } - ) - - # Handle response/return value - if return_value is not None: - if hasattr(return_value, "role"): - # ChatMessage object - attributes.update( - { - MessageAttributes.COMPLETION_ROLE.format(i=0): return_value.role, - MessageAttributes.COMPLETION_CONTENT.format(i=0): return_value.content or "", - } - ) - - # Handle tool calls in response - if hasattr(return_value, "tool_calls") and return_value.tool_calls: - for j, tool_call in enumerate(return_value.tool_calls): - attributes.update( - { - MessageAttributes.COMPLETION_TOOL_CALL_ID.format(i=0, j=j): tool_call.id, - MessageAttributes.COMPLETION_TOOL_CALL_TYPE.format(i=0, j=j): tool_call.type, - MessageAttributes.COMPLETION_TOOL_CALL_NAME.format(i=0, j=j): tool_call.function.name, - MessageAttributes.COMPLETION_TOOL_CALL_ARGUMENTS.format(i=0, j=j): json.dumps( - tool_call.function.arguments - ), - } - ) - - # Token usage - if hasattr(return_value, "token_usage") and return_value.token_usage: - attributes.update( - { - SpanAttributes.LLM_USAGE_PROMPT_TOKENS: return_value.token_usage.input_tokens, - SpanAttributes.LLM_USAGE_COMPLETION_TOKENS: return_value.token_usage.output_tokens, - SpanAttributes.LLM_USAGE_TOTAL_TOKENS: ( - return_value.token_usage.input_tokens + return_value.token_usage.output_tokens - ), - } - ) - - # Response ID - if hasattr(return_value, "raw") and return_value.raw and hasattr(return_value.raw, "id"): - attributes[SpanAttributes.LLM_RESPONSE_ID] = return_value.raw.id - - elif isinstance(return_value, dict): - # Handle dict response - attributes[MessageAttributes.COMPLETION_CONTENT.format(i=0)] = json.dumps(return_value) + try: + # Extract model name from various sources + model_name = "unknown" + + # Try to get from kwargs + if kwargs: + if "model" in kwargs: + model_name = kwargs["model"] + elif kwargs.get("self") and hasattr(kwargs["self"], "model_id"): + model_name = kwargs["self"].model_id + + # Try to get from args (instance is usually first arg in methods) + if model_name == "unknown" and args and len(args) > 0: + instance = args[0] + if hasattr(instance, "model_id"): + model_name = instance.model_id + + # Set model attributes + attributes[SpanAttributes.LLM_REQUEST_MODEL] = model_name + + # Extract messages from kwargs + if kwargs and "messages" in kwargs: + messages = kwargs["messages"] + if isinstance(messages, list): + for i, message in enumerate(messages): + message_dict = message + if hasattr(message, "to_dict"): + message_dict = message.to_dict() + elif hasattr(message, "__dict__"): + message_dict = message.__dict__ + + if isinstance(message_dict, dict): + # Set role + role = message_dict.get("role", "user") + attributes[MessageAttributes.PROMPT_ROLE.format(i=i)] = role + + # Set content + content = message_dict.get("content", "") + if content: + attributes[MessageAttributes.PROMPT_CONTENT.format(i=i)] = str(content) + + # Extract tools from kwargs + if kwargs and "tools_to_call_from" in kwargs: + tools = kwargs["tools_to_call_from"] + if tools and isinstance(tools, list): + for i, tool in enumerate(tools): + tool_name = getattr(tool, "name", "unknown") + tool_description = getattr(tool, "description", "") + + attributes[MessageAttributes.TOOL_CALL_NAME.format(i=i)] = tool_name + if tool_description: + attributes[MessageAttributes.TOOL_CALL_DESCRIPTION.format(i=i)] = tool_description + + # Extract additional parameters + if kwargs: + if "temperature" in kwargs: + attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] = kwargs["temperature"] + if "max_tokens" in kwargs: + attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] = kwargs["max_tokens"] + if "stop_sequences" in kwargs: + attributes[SpanAttributes.LLM_REQUEST_STOP_SEQUENCES] = json.dumps(kwargs["stop_sequences"]) + + # Extract response attributes + if return_value: + try: + # Handle ChatMessage response + if hasattr(return_value, "content"): + attributes[MessageAttributes.COMPLETION_CONTENT.format(i=0)] = str(return_value.content) + if hasattr(return_value, "role"): + attributes[MessageAttributes.COMPLETION_ROLE.format(i=0)] = return_value.role + + # Handle tool calls in response + if hasattr(return_value, "tool_calls") and return_value.tool_calls: + for j, tool_call in enumerate(return_value.tool_calls): + if hasattr(tool_call, "function"): + attributes[ + MessageAttributes.COMPLETION_TOOL_CALL_NAME.format(i=0, j=j) + ] = tool_call.function.name + if hasattr(tool_call.function, "arguments"): + attributes[ + MessageAttributes.COMPLETION_TOOL_CALL_ARGUMENTS.format(i=0, j=j) + ] = tool_call.function.arguments + if hasattr(tool_call, "id"): + attributes[MessageAttributes.COMPLETION_TOOL_CALL_ID.format(i=0, j=j)] = tool_call.id + + # Extract token usage + if hasattr(return_value, "token_usage") and return_value.token_usage: + token_usage = return_value.token_usage + if hasattr(token_usage, "input_tokens"): + attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = token_usage.input_tokens + if hasattr(token_usage, "output_tokens"): + attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = token_usage.output_tokens + if hasattr(token_usage, "total_tokens"): + attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = token_usage.total_tokens + + # Extract response ID + if hasattr(return_value, "raw") and return_value.raw: + raw_response = return_value.raw + if hasattr(raw_response, "id"): + attributes[SpanAttributes.LLM_RESPONSE_ID] = raw_response.id + + except Exception: + # If we can't extract response attributes, continue with what we have + pass + + except Exception: + # If extraction fails, return basic attributes + pass return attributes @@ -162,69 +144,62 @@ def get_stream_attributes( kwargs: Optional[Dict] = None, return_value: Optional[Any] = None, ) -> Dict[str, Any]: - """Extract attributes from a streaming model response. + """Extract attributes from a streaming model generation call. Args: args: Optional tuple of positional arguments kwargs: Optional dict of keyword arguments - return_value: Optional return value from the wrapped function + return_value: Optional return value from the function Returns: - Dict containing extracted attributes + Dictionary of extracted attributes """ attributes = get_common_attributes() - # Extract model info from instance - if args and len(args) > 0: - instance = args[0] - model_id = getattr(instance, "model_id", "unknown") - - attributes.update( - { - SpanAttributes.LLM_REQUEST_MODEL: model_id, - SpanAttributes.LLM_SYSTEM: instance.__class__.__name__, - SpanAttributes.LLM_REQUEST_STREAMING: True, - } - ) - - # Extract messages from args/kwargs - messages = None - if args and len(args) > 1: - messages = args[1] - elif kwargs and "messages" in kwargs: - messages = kwargs["messages"] - - if messages: - # Process prompt messages (same as non-streaming) - for i, msg in enumerate(messages): - if isinstance(msg, dict): - role = msg.get("role", "unknown") - content = msg.get("content", "") - - # Handle content that might be a list - if isinstance(content, list): - text_content = "" - for item in content: - if isinstance(item, dict) and item.get("type") == "text": - text_content += item.get("text", "") - content = text_content - - attributes.update( - { - MessageAttributes.PROMPT_ROLE.format(i=i): role, - MessageAttributes.PROMPT_CONTENT.format(i=i): content, - MessageAttributes.PROMPT_TYPE.format(i=i): "text", - } - ) - - # Extract streaming-specific parameters - if kwargs: - stop_sequences = kwargs.get("stop_sequences") - if stop_sequences: - attributes[SpanAttributes.LLM_REQUEST_STOP_SEQUENCES] = json.dumps(stop_sequences) - - # Note: For streaming, the return_value is typically a generator - # Individual chunks would need to be tracked separately - attributes["llm.response.is_stream"] = True + try: + # Extract model name + model_name = "unknown" + if kwargs and kwargs.get("self") and hasattr(kwargs["self"], "model_id"): + model_name = kwargs["self"].model_id + elif args and len(args) > 0 and hasattr(args[0], "model_id"): + model_name = args[0].model_id + + attributes[SpanAttributes.LLM_REQUEST_MODEL] = model_name + attributes["gen_ai.request.streaming"] = True + + # Extract messages for streaming + if kwargs and "messages" in kwargs: + messages = kwargs["messages"] + if isinstance(messages, list): + for i, message in enumerate(messages): + message_dict = message + if hasattr(message, "to_dict"): + message_dict = message.to_dict() + elif hasattr(message, "__dict__"): + message_dict = message.__dict__ + + if isinstance(message_dict, dict): + role = message_dict.get("role", "user") + attributes[MessageAttributes.PROMPT_ROLE.format(i=i)] = role + + content = message_dict.get("content", "") + if content: + attributes[MessageAttributes.PROMPT_CONTENT.format(i=i)] = str(content) + + # Extract tools for streaming + if kwargs and "tools_to_call_from" in kwargs: + tools = kwargs["tools_to_call_from"] + if tools and isinstance(tools, list): + for i, tool in enumerate(tools): + tool_name = getattr(tool, "name", "unknown") + tool_description = getattr(tool, "description", "") + + attributes[MessageAttributes.TOOL_CALL_NAME.format(i=i)] = tool_name + if tool_description: + attributes[MessageAttributes.TOOL_CALL_DESCRIPTION.format(i=i)] = tool_description + + except Exception: + # If extraction fails, return basic attributes + pass return attributes diff --git a/agentops/instrumentation/smolagents/instrumentor.py b/agentops/instrumentation/smolagents/instrumentor.py index b0930fe35..37b45b750 100644 --- a/agentops/instrumentation/smolagents/instrumentor.py +++ b/agentops/instrumentation/smolagents/instrumentor.py @@ -3,80 +3,13 @@ from typing import Collection from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.trace import get_tracer, SpanKind +from wrapt import wrap_function_wrapper -from agentops.instrumentation.common.wrappers import WrapConfig, wrap, unwrap +from agentops.instrumentation.common.wrappers import unwrap # Define LIBRARY_VERSION directly to avoid circular import LIBRARY_VERSION = "1.16.0" -# Skip short-duration or redundant spans -SKIP_SHORT_DURATION_SPANS = True -MIN_SPAN_DURATION_MS = 100 # Skip spans shorter than 100ms - - -# Dynamic span naming functions -def get_agent_span_name(args, kwargs, instance=None): - """Generate dynamic span name for agent operations.""" - if not instance and args and len(args) > 0: - instance = args[0] - - if instance: - agent_type = instance.__class__.__name__.replace("Agent", "").lower() - task = kwargs.get("task", "") if kwargs else "" - if task and len(task) > 50: - task = task[:50] + "..." - if task: - return f"agent.{agent_type}({task})" - return f"agent.{agent_type}.run" - return "agent.run" - - -def get_llm_span_name(args, kwargs, instance=None): - """Generate dynamic span name for LLM operations with model name.""" - if not instance and args and len(args) > 0: - instance = args[0] - - model_name = "unknown" - if instance: - if hasattr(instance, "model_id"): - model_name = instance.model_id - elif hasattr(instance, "__class__"): - model_name = instance.__class__.__name__ - - # Clean up model name for display - if model_name.startswith("openai/"): - model_name = model_name[7:] # Remove 'openai/' prefix - elif "/" in model_name: - model_name = model_name.split("/")[-1] # Take last part of path - - if kwargs and "stream" in kwargs and kwargs["stream"]: - return f"llm.generate_stream({model_name})" - return f"llm.generate({model_name})" - - -def get_tool_span_name(args, kwargs, instance=None): - """Generate dynamic span name for tool operations.""" - if not instance and args and len(args) > 0: - instance = args[0] - - tool_name = "unknown" - if instance: - if hasattr(instance, "name"): - tool_name = instance.name - elif hasattr(instance, "__class__"): - tool_name = instance.__class__.__name__ - - # If there's a tool_call argument, extract the tool name - if kwargs: - tool_call = kwargs.get("tool_call") - if tool_call and hasattr(tool_call, "name"): - tool_name = tool_call.name - elif tool_call and hasattr(tool_call, "function") and hasattr(tool_call.function, "name"): - tool_name = tool_call.function.name - - return f"tool.{tool_name}" - - # Import attribute handlers try: from agentops.instrumentation.smolagents.attributes.agent import ( @@ -89,13 +22,10 @@ def get_tool_span_name(args, kwargs, instance=None): ) from agentops.instrumentation.smolagents.attributes.model import ( get_model_attributes, - get_model_stream_attributes, + get_stream_attributes, ) - from agentops.instrumentation.smolagents.stream_wrapper import SmoLAgentsStreamWrapper -except ImportError as e: - print(f"🖇 AgentOps: Error importing smolagents attributes: {e}") - - # Fallback functions +except ImportError: + # Fallback functions if imports fail def get_agent_attributes(*args, **kwargs): return {} @@ -117,155 +47,196 @@ def get_managed_agent_attributes(*args, **kwargs): def get_model_attributes(*args, **kwargs): return {} - def get_model_stream_attributes(*args, **kwargs): + def get_stream_attributes(*args, **kwargs): return {} - class SmoLAgentsStreamWrapper: - def __init__(self, *args, **kwargs): - pass - -class SmoLAgentsInstrumentor(BaseInstrumentor): - """An instrumentor for SmoLAgents.""" +class SmolAgentsInstrumentor(BaseInstrumentor): + """Instrumentor for SmoLAgents library.""" def instrumentation_dependencies(self) -> Collection[str]: - """Get instrumentation dependencies. - - Returns: - Collection of package names requiring instrumentation - """ - return [] - - def _instrument(self, **kwargs): - """Instrument SmoLAgents library.""" - try: - import smolagents # noqa: F401 - except ImportError: - print("🖇 AgentOps: SmoLAgents not found - skipping instrumentation") - return - - tracer = get_tracer(__name__, LIBRARY_VERSION) - - # ========================= - # Core agent instrumentation with improved naming - # ========================= - - # Instrument main agent run method - primary agent execution spans - wrap( - WrapConfig( - trace_name=get_agent_span_name, - package="smolagents.agents", - class_name="MultiStepAgent", - method_name="run", - handler=get_agent_attributes, - span_kind=SpanKind.INTERNAL, - ), - tracer=tracer, + return ( + "smolagents >= 1.0.0", + "litellm", ) - # Skip redundant agent.run_stream spans (they're typically very short) - # Only instrument if not already covered by .run method - - # ========================= - # Tool execution instrumentation with better naming - # ========================= - - # Primary tool execution spans with descriptive names - wrap( - WrapConfig( - trace_name=get_tool_span_name, - package="smolagents.agents", - class_name="ToolCallingAgent", - method_name="execute_tool_call", - handler=get_tool_call_attributes, - span_kind=SpanKind.CLIENT, - ), - tracer=tracer, - ) + def _instrument(self, **kwargs): + """Instrument SmoLAgents with AgentOps telemetry.""" + tracer_provider = kwargs.get("tracer_provider") + tracer = get_tracer(__name__, LIBRARY_VERSION, tracer_provider) - # Skip redundant tool.execute spans (they add minimal value over execute_tool_call) - # The Tool.__call__ method creates very short spans that just wrap the actual execution - - # ========================= - # LLM instrumentation with model names - # ========================= - - # Primary LLM generation spans with model names in span title - wrap( - WrapConfig( - trace_name=get_llm_span_name, - package="smolagents.models", - class_name="LiteLLMModel", - method_name="generate", - handler=get_model_attributes, - span_kind=SpanKind.CLIENT, - ), - tracer=tracer, - ) + # Core agent operations + wrap_function_wrapper("smolagents.agents", "CodeAgent.run", self._agent_run_wrapper(tracer)) - # LLM streaming with model names - wrap( - WrapConfig( - trace_name=get_llm_span_name, - package="smolagents.models", - class_name="LiteLLMModel", - method_name="generate_stream", - handler=get_model_stream_attributes, - span_kind=SpanKind.CLIENT, - ), - tracer=tracer, - ) + wrap_function_wrapper("smolagents.agents", "ToolCallingAgent.run", self._agent_run_wrapper(tracer)) - # ========================= - # Agent step instrumentation (selective) - # ========================= - - # Only instrument step execution if it provides meaningful context - # Skip very short step_stream spans that just wrap other operations - wrap( - WrapConfig( - trace_name=lambda args, kwargs: f"agent.step_{kwargs.get('step_number', 'unknown')}", - package="smolagents.agents", - class_name="MultiStepAgent", - method_name="step", - handler=get_agent_step_attributes, - span_kind=SpanKind.INTERNAL, - ), - tracer=tracer, + # Tool calling operations + wrap_function_wrapper( + "smolagents.agents", "ToolCallingAgent.execute_tool_call", self._tool_execution_wrapper(tracer) ) - # ========================= - # Managed agent instrumentation - # ========================= - - # For multi-agent workflows - wrap( - WrapConfig( - trace_name=lambda args, kwargs: f"agent.managed_call({kwargs.get('agent_name', 'unknown')})", - package="smolagents.agents", - class_name="MultiStepAgent", - method_name="managed_call", - handler=get_managed_agent_attributes, - span_kind=SpanKind.INTERNAL, - ), - tracer=tracer, - ) - - # Note: Removed memory instrumentation due to class structure differences - # in smolagents.memory module + # Model operations with proper model name extraction + wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate", self._llm_wrapper(tracer)) + + wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate_stream", self._llm_wrapper(tracer)) + + def _agent_run_wrapper(self, tracer): + """Wrapper for agent run methods.""" + + def wrapper(wrapped, instance, args, kwargs): + # Get proper agent name - handle None case + agent_name = getattr(instance, "name", None) + if not agent_name: # Handle None, empty string, or missing attribute + agent_name = instance.__class__.__name__ + + span_name = f"{agent_name}.run" + + with tracer.start_as_current_span( + span_name, + kind=SpanKind.CLIENT, + ) as span: + # Extract attributes + attributes = get_agent_attributes(args=(instance,) + args, kwargs=kwargs) + + # Fix managed agents attribute + if hasattr(instance, "managed_agents") and instance.managed_agents: + managed_agent_names = [] + for agent in instance.managed_agents: + name = getattr(agent, "name", None) + if not name: # Handle None case for managed agents too + name = agent.__class__.__name__ + managed_agent_names.append(name) + attributes["agent.managed_agents"] = str(managed_agent_names) + else: + attributes["agent.managed_agents"] = "[]" + + for key, value in attributes.items(): + if value is not None: + span.set_attribute(key, value) + + try: + result = wrapped(*args, **kwargs) + + # Set output attribute + if result is not None: + span.set_attribute("agentops.entity.output", str(result)) + + return result + except Exception as e: + span.record_exception(e) + raise + + return wrapper + + def _tool_execution_wrapper(self, tracer): + """Wrapper for tool execution methods.""" + + def wrapper(wrapped, instance, args, kwargs): + # Extract tool name for better span naming + tool_name = "unknown" + if args and len(args) > 0: + tool_call = args[0] + if hasattr(tool_call, "function"): + tool_name = tool_call.function.name + + span_name = f"tool.{tool_name}" if tool_name != "unknown" else "tool.execute" + + with tracer.start_as_current_span( + span_name, + kind=SpanKind.CLIENT, + ) as span: + # Extract tool information from kwargs or args + tool_params = "{}" + + # Try to extract tool call information + if args and len(args) > 0: + tool_call = args[0] + if hasattr(tool_call, "function"): + if hasattr(tool_call.function, "arguments"): + tool_params = str(tool_call.function.arguments) + + # Extract attributes + attributes = get_tool_call_attributes(args=(instance,) + args, kwargs=kwargs) + + # Override with better tool information if available + if tool_name != "unknown": + attributes["tool.name"] = tool_name + attributes["tool.parameters"] = tool_params + + for key, value in attributes.items(): + if value is not None: + span.set_attribute(key, value) + + try: + result = wrapped(*args, **kwargs) + + # Set success status and result + span.set_attribute("tool.status", "success") + if result is not None: + span.set_attribute("tool.result", str(result)) + + return result + except Exception as e: + span.set_attribute("tool.status", "error") + span.record_exception(e) + raise + + return wrapper + + def _llm_wrapper(self, tracer): + """Wrapper for LLM generation methods with proper model name extraction.""" + + def wrapper(wrapped, instance, args, kwargs): + # Extract model name from instance + model_name = getattr(instance, "model_id", "unknown") + + # Determine if this is streaming + is_streaming = "generate_stream" in wrapped.__name__ + operation = "generate_stream" if is_streaming else "generate" + span_name = f"litellm.{operation} ({model_name})" if model_name != "unknown" else f"litellm.{operation}" + + with tracer.start_as_current_span( + span_name, + kind=SpanKind.CLIENT, + ) as span: + # Extract attributes + if is_streaming: + attributes = get_stream_attributes(args=(instance,) + args, kwargs=kwargs) + else: + attributes = get_model_attributes(args=(instance,) + args, kwargs=kwargs) + + # Ensure model name is properly set + attributes["gen_ai.request.model"] = model_name + + for key, value in attributes.items(): + if value is not None: + span.set_attribute(key, value) + + try: + result = wrapped(*args, **kwargs) + + # Extract response attributes if available + if result and hasattr(result, "content"): + span.set_attribute("gen_ai.completion.0.content", str(result.content)) + if result and hasattr(result, "token_usage"): + token_usage = result.token_usage + if hasattr(token_usage, "input_tokens"): + span.set_attribute("gen_ai.usage.prompt_tokens", token_usage.input_tokens) + if hasattr(token_usage, "output_tokens"): + span.set_attribute("gen_ai.usage.completion_tokens", token_usage.output_tokens) + + return result + except Exception as e: + span.record_exception(e) + raise + + return wrapper def _uninstrument(self, **kwargs): - """Uninstrument SmoLAgents. - - Args: - **kwargs: Uninstrumentation options - """ - # Uninstrument agent methods - unwrap("smolagents.agents", "MultiStepAgent.run") - unwrap("smolagents.agents", "MultiStepAgent._generate_planning_step") + """Remove instrumentation.""" + # Unwrap all instrumented methods + unwrap("smolagents.agents", "CodeAgent.run") + unwrap("smolagents.agents", "ToolCallingAgent.run") unwrap("smolagents.agents", "ToolCallingAgent.execute_tool_call") - unwrap("smolagents.agents", "MultiStepAgent.__call__") - - # Uninstrument model methods unwrap("smolagents.models", "LiteLLMModel.generate") unwrap("smolagents.models", "LiteLLMModel.generate_stream") From 17d821a6585216dd87b4e9da077424548826af85 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Thu, 12 Jun 2025 21:27:05 +0530 Subject: [PATCH 07/13] feat: Add multi-agent system notebook example --- .../multi_smolagents.ipynb | 252 ++++++++++++++++++ 1 file changed, 252 insertions(+) create mode 100644 examples/smolagents_examples/multi_smolagents.ipynb diff --git a/examples/smolagents_examples/multi_smolagents.ipynb b/examples/smolagents_examples/multi_smolagents.ipynb new file mode 100644 index 000000000..b8689f48d --- /dev/null +++ b/examples/smolagents_examples/multi_smolagents.ipynb @@ -0,0 +1,252 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "7d4c41ff", + "metadata": {}, + "source": [ + "# Orchestrate a Multi-Agent System\n", + "\n", + "In this notebook, we will make a multi-agent web browser: an agentic system with several agents collaborating to solve problems using the web!\n", + "\n", + "It will be a simple hierarchy, using a `ManagedAgent` object to wrap the managed web search agent:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "015b0a87", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install markdownify\n", + "%pip install duckduckgo-search\n", + "%pip install smolagents\n", + "%pip install agentops" + ] + }, + { + "cell_type": "markdown", + "id": "00509499", + "metadata": {}, + "source": [ + "🖇️ Now we initialize the AgentOps client and load the environment variables to use the API keys." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "330770fd", + "metadata": {}, + "outputs": [], + "source": [ + "import agentops\n", + "from dotenv import load_dotenv\n", + "import os\n", + "\n", + "load_dotenv()\n", + "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\"\n", + "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"" + ] + }, + { + "cell_type": "markdown", + "id": "9516d2a7", + "metadata": {}, + "source": [ + "⚡️ Our agent will be powered by `openai/gpt-4o-mini` using the `LiteLLMModel` class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5f78927c", + "metadata": {}, + "outputs": [], + "source": [ + "from smolagents import LiteLLMModel\n", + "\n", + "agentops.init(api_key=AGENTOPS_API_KEY, default_tags=[\"smolagents\", \"example\", \"multi-agent\"])\n", + "model = LiteLLMModel(\"openai/gpt-4o-mini\")" + ] + }, + { + "cell_type": "markdown", + "id": "a08cc376", + "metadata": {}, + "source": [ + "## Create a Web Search Tool\n", + "\n", + "For web browsing, we can already use our pre-existing `DuckDuckGoSearchTool`. However, we will also create a `VisitWebpageTool` from scratch using `markdownify`. Here’s how:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "01689447", + "metadata": {}, + "outputs": [], + "source": [ + "import re\n", + "import requests\n", + "from markdownify import markdownify\n", + "from requests.exceptions import RequestException\n", + "from smolagents import tool\n", + "\n", + "\n", + "@tool\n", + "def visit_webpage(url: str) -> str:\n", + " \"\"\"Visits a webpage at the given URL and returns its content as a markdown string.\n", + "\n", + " Args:\n", + " url: The URL of the webpage to visit.\n", + "\n", + " Returns:\n", + " The content of the webpage converted to Markdown, or an error message if the request fails.\n", + " \"\"\"\n", + " try:\n", + " # Send a GET request to the URL\n", + " response = requests.get(url)\n", + " response.raise_for_status() # Raise an exception for bad status codes\n", + "\n", + " # Convert the HTML content to Markdown\n", + " markdown_content = markdownify(response.text).strip()\n", + "\n", + " # Remove multiple line breaks\n", + " markdown_content = re.sub(r\"\\n{3,}\", \"\\n\\n\", markdown_content)\n", + "\n", + " return markdown_content\n", + "\n", + " except RequestException as e:\n", + " return f\"Error fetching the webpage: {str(e)}\"\n", + " except Exception as e:\n", + " return f\"An unexpected error occurred: {str(e)}\"" + ] + }, + { + "cell_type": "markdown", + "id": "3c45517b", + "metadata": {}, + "source": [ + "Let’s test our tool:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51cc54f1", + "metadata": {}, + "outputs": [], + "source": [ + "print(visit_webpage(\"https://en.wikipedia.org/wiki/Hugging_Face\")[:500])" + ] + }, + { + "cell_type": "markdown", + "id": "921df68d", + "metadata": {}, + "source": [ + "## Build Our Multi-Agent System\n", + "\n", + "We will now use the tools `search` and `visit_webpage` to create the web agent." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "f274b34f", + "metadata": {}, + "outputs": [], + "source": [ + "from smolagents import (\n", + " CodeAgent,\n", + " ToolCallingAgent,\n", + " DuckDuckGoSearchTool,\n", + ")\n", + "\n", + "web_agent = ToolCallingAgent(\n", + " tools=[DuckDuckGoSearchTool(), visit_webpage],\n", + " model=model,\n", + " name=\"search\",\n", + " description=\"Runs web searches for you. Give it your query as an argument.\",\n", + ")\n", + "\n", + "manager_agent = CodeAgent(\n", + " tools=[],\n", + " model=model,\n", + " managed_agents=[web_agent],\n", + " additional_authorized_imports=[\"time\", \"numpy\", \"pandas\"],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d5977883", + "metadata": {}, + "source": [ + "Let’s run our system with the following query:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e1e497c1", + "metadata": {}, + "outputs": [], + "source": [ + "answer = manager_agent.run(\n", + " \"If LLM trainings continue to scale up at the current rhythm until 2030, what would be the electric power in GW required to power the biggest training runs by 2030? What does that correspond to, compared to some countries? Please provide a source for any number used.\"\n", + ")\n", + "\n", + "print(answer)" + ] + }, + { + "cell_type": "markdown", + "id": "169583c6", + "metadata": {}, + "source": [ + "Awesome! We've successfully run a multi-agent system. Let's end the agentops session with a \"Success\" state. You can also end the session with a \"Failure\" or \"Indeterminate\" state, which is set as default." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f82fafac", + "metadata": {}, + "outputs": [], + "source": [ + "agentops.end_session(\"Success\")" + ] + }, + { + "cell_type": "markdown", + "id": "d373e4ea", + "metadata": {}, + "source": [ + "You can view the session in the [AgentOps dashboard](https://app.agentops.ai/sessions) by clicking the link provided after ending the session." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "test", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From ac323f41d3726f4435d40e03489d14398380dc36 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Thu, 12 Jun 2025 21:34:57 +0530 Subject: [PATCH 08/13] fix: Update execution counts and correct session ending method in multi-agent notebook --- .../smolagents_examples/multi_smolagents.ipynb | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/examples/smolagents_examples/multi_smolagents.ipynb b/examples/smolagents_examples/multi_smolagents.ipynb index b8689f48d..72710d0a5 100644 --- a/examples/smolagents_examples/multi_smolagents.ipynb +++ b/examples/smolagents_examples/multi_smolagents.ipynb @@ -35,7 +35,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "330770fd", "metadata": {}, "outputs": [], @@ -66,7 +66,8 @@ "source": [ "from smolagents import LiteLLMModel\n", "\n", - "agentops.init(api_key=AGENTOPS_API_KEY, default_tags=[\"smolagents\", \"example\", \"multi-agent\"])\n", + "agentops.init(api_key=AGENTOPS_API_KEY, default_tags=[\"smolagents\", \"example\", \"multi-agent\"], auto_start_session=False)\n", + "agentops.start_trace(\"SmolAgents Example\")\n", "model = LiteLLMModel(\"openai/gpt-4o-mini\")" ] }, @@ -82,7 +83,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "01689447", "metadata": {}, "outputs": [], @@ -153,7 +154,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "f274b34f", "metadata": {}, "outputs": [], @@ -216,7 +217,7 @@ "metadata": {}, "outputs": [], "source": [ - "agentops.end_session(\"Success\")" + "agentops.end_trace(\"Success\")" ] }, { @@ -230,7 +231,7 @@ ], "metadata": { "kernelspec": { - "display_name": "test", + "display_name": ".venv", "language": "python", "name": "python3" }, @@ -244,7 +245,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.8" + "version": "3.11.12" } }, "nbformat": 4, From eb0eb7ea3a4240fade6b926eb5e833b5c978079f Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Thu, 12 Jun 2025 21:50:24 +0530 Subject: [PATCH 09/13] fix: Correct markdown formatting and remove deprecated multi-agent example notebook --- .../smolagents/multi_smolagents_system.ipynb | 95 +++++-- .../smolagents/multi_smolagents_system.py | 10 +- .../multi_smolagents.ipynb | 253 ------------------ 3 files changed, 79 insertions(+), 279 deletions(-) delete mode 100644 examples/smolagents_examples/multi_smolagents.ipynb diff --git a/examples/smolagents/multi_smolagents_system.ipynb b/examples/smolagents/multi_smolagents_system.ipynb index 17d0bdd48..9a97f154e 100644 --- a/examples/smolagents/multi_smolagents_system.ipynb +++ b/examples/smolagents/multi_smolagents_system.ipynb @@ -34,7 +34,7 @@ " | Visit webpage tool |\n", " +--------------------------------+\n", "```\n", - "Let\u2019s set up this system.\n", + "Let’s set up this system.\n", "\n", "Run the line below to install the required dependencies:" ] @@ -57,32 +57,51 @@ "id": "00509499", "metadata": {}, "source": [ - "\ud83d\udd87\ufe0f Now we initialize the AgentOps client and load the environment variables to use the API keys." + "🖇️ Now we initialize the AgentOps client and load the environment variables to use the API keys." ] }, { "cell_type": "code", + "execution_count": null, "id": "330770fd", "metadata": {}, "outputs": [], - "source": "import agentops\nfrom dotenv import load_dotenv\nimport os\nimport re\nimport requests\nfrom markdownify import markdownify\nfrom requests.exceptions import RequestException\nfrom smolagents import tool\nfrom smolagents import LiteLLMModel\nfrom smolagents import (\n CodeAgent,\n ToolCallingAgent,\n ManagedAgent,\n DuckDuckGoSearchTool,\n)\n\nload_dotenv()\nos.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_api_key_here\")\nos.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\")", - "execution_count": null + "source": [ + "import agentops\n", + "from dotenv import load_dotenv\n", + "import os\n", + "import re\n", + "import requests\n", + "from markdownify import markdownify\n", + "from requests.exceptions import RequestException\n", + "\n", + "load_dotenv()\n", + "os.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_api_key_here\")\n", + "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\")" + ] }, { "cell_type": "markdown", "id": "9516d2a7", "metadata": {}, "source": [ - "\u26a1\ufe0f Our agent will be powered by `openai/gpt-4o-mini` using the `LiteLLMModel` class." + "⚡️ Our agent will be powered by `openai/gpt-4o-mini` using the `LiteLLMModel` class." ] }, { "cell_type": "code", + "execution_count": null, "id": "5f78927c", "metadata": {}, "outputs": [], - "source": "agentops.init(auto_start_session=False)\ntracer = agentops.start_trace(\n trace_name=\"Orchestrate a Multi-Agent System\", tags=[\"smolagents\", \"example\", \"multi-agent\", \"agentops-example\"]\n)\nmodel = LiteLLMModel(\"openai/gpt-4o-mini\")", - "execution_count": null + "source": [ + "agentops.init(auto_start_session=False)\n", + "tracer = agentops.start_trace(\n", + " trace_name=\"Orchestrate a Multi-Agent System\", tags=[\"smolagents\", \"example\", \"multi-agent\", \"agentops-example\"]\n", + ")\n", + "from smolagents import LiteLLMModel, tool ,CodeAgent, ToolCallingAgent, DuckDuckGoSearchTool\n", + "model = LiteLLMModel(\"openai/gpt-4o-mini\")" + ] }, { "cell_type": "markdown", @@ -91,23 +110,51 @@ "source": [ "## Create a Web Search Tool\n", "\n", - "For web browsing, we can already use our pre-existing `DuckDuckGoSearchTool`. However, we will also create a `VisitWebpageTool` from scratch using `markdownify`. Here\u2019s how:" + "For web browsing, we can already use our pre-existing `DuckDuckGoSearchTool`. However, we will also create a `VisitWebpageTool` from scratch using `markdownify`. Here’s how:" ] }, { "cell_type": "code", + "execution_count": null, "id": "01689447", "metadata": {}, "outputs": [], - "source": "@tool\ndef visit_webpage(url: str) -> str:\n \"\"\"Visits a webpage at the given URL and returns its content as a markdown string.\n\n Args:\n url: The URL of the webpage to visit.\n\n Returns:\n The content of the webpage converted to Markdown, or an error message if the request fails.\n \"\"\"\n try:\n # Send a GET request to the URL\n response = requests.get(url)\n response.raise_for_status() # Raise an exception for bad status codes\n\n # Convert the HTML content to Markdown\n markdown_content = markdownify(response.text).strip()\n\n # Remove multiple line breaks\n markdown_content = re.sub(r\"\\n{3,}\", \"\\n\\n\", markdown_content)\n\n return markdown_content\n\n except RequestException as e:\n return f\"Error fetching the webpage: {str(e)}\"\n except Exception as e:\n return f\"An unexpected error occurred: {str(e)}\"", - "execution_count": null + "source": [ + "@tool\n", + "def visit_webpage(url: str) -> str:\n", + " \"\"\"Visits a webpage at the given URL and returns its content as a markdown string.\n", + "\n", + " Args:\n", + " url: The URL of the webpage to visit.\n", + "\n", + " Returns:\n", + " The content of the webpage converted to Markdown, or an error message if the request fails.\n", + " \"\"\"\n", + " try:\n", + " # Send a GET request to the URL\n", + " response = requests.get(url)\n", + " response.raise_for_status() # Raise an exception for bad status codes\n", + "\n", + " # Convert the HTML content to Markdown\n", + " markdown_content = markdownify(response.text).strip()\n", + "\n", + " # Remove multiple line breaks\n", + " markdown_content = re.sub(r\"\\n{3,}\", \"\\n\\n\", markdown_content)\n", + "\n", + " return markdown_content\n", + "\n", + " except RequestException as e:\n", + " return f\"Error fetching the webpage: {str(e)}\"\n", + " except Exception as e:\n", + " return f\"An unexpected error occurred: {str(e)}\"" + ] }, { "cell_type": "markdown", "id": "3c45517b", "metadata": {}, "source": [ - "Let\u2019s test our tool:" + "Let’s test our tool:" ] }, { @@ -132,18 +179,32 @@ }, { "cell_type": "code", + "execution_count": null, "id": "f274b34f", "metadata": {}, "outputs": [], - "source": "web_agent = ToolCallingAgent(\n tools=[DuckDuckGoSearchTool(), visit_webpage],\n model=model,\n max_iterations=10,\n)\n\nmanaged_web_agent = ManagedAgent(\n agent=web_agent,\n name=\"search\",\n description=\"Runs web searches for you. Give it your query as an argument.\",\n)\n\nmanager_agent = CodeAgent(\n tools=[],\n model=model,\n managed_agents=[managed_web_agent],\n additional_authorized_imports=[\"time\", \"numpy\", \"pandas\"],\n)", - "execution_count": null + "source": [ + "web_agent = ToolCallingAgent(\n", + " tools=[DuckDuckGoSearchTool(), visit_webpage],\n", + " model=model,\n", + " name=\"search\",\n", + " description=\"Runs web searches for you. Give it your query as an argument.\",\n", + ")\n", + "\n", + "manager_agent = CodeAgent(\n", + " tools=[],\n", + " model=model,\n", + " managed_agents=[web_agent],\n", + " additional_authorized_imports=[\"time\", \"numpy\", \"pandas\"],\n", + ")" + ] }, { "cell_type": "markdown", "id": "d5977883", "metadata": {}, "source": [ - "Let\u2019s run our system with the following query:" + "Let’s run our system with the following query:" ] }, { @@ -189,7 +250,7 @@ ], "metadata": { "kernelspec": { - "display_name": "test", + "display_name": ".venv", "language": "python", "name": "python3" }, @@ -203,9 +264,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.8" + "version": "3.11.12" } }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/examples/smolagents/multi_smolagents_system.py b/examples/smolagents/multi_smolagents_system.py index b7d0319de..c14740b30 100644 --- a/examples/smolagents/multi_smolagents_system.py +++ b/examples/smolagents/multi_smolagents_system.py @@ -40,7 +40,6 @@ from smolagents import ( CodeAgent, ToolCallingAgent, - ManagedAgent, DuckDuckGoSearchTool, ) @@ -96,19 +95,12 @@ def visit_webpage(url: str) -> str: web_agent = ToolCallingAgent( tools=[DuckDuckGoSearchTool(), visit_webpage], model=model, - max_iterations=10, -) - -managed_web_agent = ManagedAgent( - agent=web_agent, - name="search", - description="Runs web searches for you. Give it your query as an argument.", ) manager_agent = CodeAgent( tools=[], model=model, - managed_agents=[managed_web_agent], + managed_agents=[web_agent], additional_authorized_imports=["time", "numpy", "pandas"], ) # Let’s run our system with the following query: diff --git a/examples/smolagents_examples/multi_smolagents.ipynb b/examples/smolagents_examples/multi_smolagents.ipynb deleted file mode 100644 index 72710d0a5..000000000 --- a/examples/smolagents_examples/multi_smolagents.ipynb +++ /dev/null @@ -1,253 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "7d4c41ff", - "metadata": {}, - "source": [ - "# Orchestrate a Multi-Agent System\n", - "\n", - "In this notebook, we will make a multi-agent web browser: an agentic system with several agents collaborating to solve problems using the web!\n", - "\n", - "It will be a simple hierarchy, using a `ManagedAgent` object to wrap the managed web search agent:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "015b0a87", - "metadata": {}, - "outputs": [], - "source": [ - "%pip install markdownify\n", - "%pip install duckduckgo-search\n", - "%pip install smolagents\n", - "%pip install agentops" - ] - }, - { - "cell_type": "markdown", - "id": "00509499", - "metadata": {}, - "source": [ - "🖇️ Now we initialize the AgentOps client and load the environment variables to use the API keys." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "330770fd", - "metadata": {}, - "outputs": [], - "source": [ - "import agentops\n", - "from dotenv import load_dotenv\n", - "import os\n", - "\n", - "load_dotenv()\n", - "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"\"\n", - "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\") or \"\"" - ] - }, - { - "cell_type": "markdown", - "id": "9516d2a7", - "metadata": {}, - "source": [ - "⚡️ Our agent will be powered by `openai/gpt-4o-mini` using the `LiteLLMModel` class." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5f78927c", - "metadata": {}, - "outputs": [], - "source": [ - "from smolagents import LiteLLMModel\n", - "\n", - "agentops.init(api_key=AGENTOPS_API_KEY, default_tags=[\"smolagents\", \"example\", \"multi-agent\"], auto_start_session=False)\n", - "agentops.start_trace(\"SmolAgents Example\")\n", - "model = LiteLLMModel(\"openai/gpt-4o-mini\")" - ] - }, - { - "cell_type": "markdown", - "id": "a08cc376", - "metadata": {}, - "source": [ - "## Create a Web Search Tool\n", - "\n", - "For web browsing, we can already use our pre-existing `DuckDuckGoSearchTool`. However, we will also create a `VisitWebpageTool` from scratch using `markdownify`. Here’s how:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "01689447", - "metadata": {}, - "outputs": [], - "source": [ - "import re\n", - "import requests\n", - "from markdownify import markdownify\n", - "from requests.exceptions import RequestException\n", - "from smolagents import tool\n", - "\n", - "\n", - "@tool\n", - "def visit_webpage(url: str) -> str:\n", - " \"\"\"Visits a webpage at the given URL and returns its content as a markdown string.\n", - "\n", - " Args:\n", - " url: The URL of the webpage to visit.\n", - "\n", - " Returns:\n", - " The content of the webpage converted to Markdown, or an error message if the request fails.\n", - " \"\"\"\n", - " try:\n", - " # Send a GET request to the URL\n", - " response = requests.get(url)\n", - " response.raise_for_status() # Raise an exception for bad status codes\n", - "\n", - " # Convert the HTML content to Markdown\n", - " markdown_content = markdownify(response.text).strip()\n", - "\n", - " # Remove multiple line breaks\n", - " markdown_content = re.sub(r\"\\n{3,}\", \"\\n\\n\", markdown_content)\n", - "\n", - " return markdown_content\n", - "\n", - " except RequestException as e:\n", - " return f\"Error fetching the webpage: {str(e)}\"\n", - " except Exception as e:\n", - " return f\"An unexpected error occurred: {str(e)}\"" - ] - }, - { - "cell_type": "markdown", - "id": "3c45517b", - "metadata": {}, - "source": [ - "Let’s test our tool:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "51cc54f1", - "metadata": {}, - "outputs": [], - "source": [ - "print(visit_webpage(\"https://en.wikipedia.org/wiki/Hugging_Face\")[:500])" - ] - }, - { - "cell_type": "markdown", - "id": "921df68d", - "metadata": {}, - "source": [ - "## Build Our Multi-Agent System\n", - "\n", - "We will now use the tools `search` and `visit_webpage` to create the web agent." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f274b34f", - "metadata": {}, - "outputs": [], - "source": [ - "from smolagents import (\n", - " CodeAgent,\n", - " ToolCallingAgent,\n", - " DuckDuckGoSearchTool,\n", - ")\n", - "\n", - "web_agent = ToolCallingAgent(\n", - " tools=[DuckDuckGoSearchTool(), visit_webpage],\n", - " model=model,\n", - " name=\"search\",\n", - " description=\"Runs web searches for you. Give it your query as an argument.\",\n", - ")\n", - "\n", - "manager_agent = CodeAgent(\n", - " tools=[],\n", - " model=model,\n", - " managed_agents=[web_agent],\n", - " additional_authorized_imports=[\"time\", \"numpy\", \"pandas\"],\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "d5977883", - "metadata": {}, - "source": [ - "Let’s run our system with the following query:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e1e497c1", - "metadata": {}, - "outputs": [], - "source": [ - "answer = manager_agent.run(\n", - " \"If LLM trainings continue to scale up at the current rhythm until 2030, what would be the electric power in GW required to power the biggest training runs by 2030? What does that correspond to, compared to some countries? Please provide a source for any number used.\"\n", - ")\n", - "\n", - "print(answer)" - ] - }, - { - "cell_type": "markdown", - "id": "169583c6", - "metadata": {}, - "source": [ - "Awesome! We've successfully run a multi-agent system. Let's end the agentops session with a \"Success\" state. You can also end the session with a \"Failure\" or \"Indeterminate\" state, which is set as default." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f82fafac", - "metadata": {}, - "outputs": [], - "source": [ - "agentops.end_trace(\"Success\")" - ] - }, - { - "cell_type": "markdown", - "id": "d373e4ea", - "metadata": {}, - "source": [ - "You can view the session in the [AgentOps dashboard](https://app.agentops.ai/sessions) by clicking the link provided after ending the session." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.12" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} From ca56c44b40f271ec46ef59a8255e656849ecb03d Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Fri, 13 Jun 2025 02:09:37 +0530 Subject: [PATCH 10/13] feat: Add Smolagents integration documentation and examples --- docs/mint.json | 1 + docs/v2/examples/smolagents.mdx | 145 +++++++++++++++++ docs/v2/integrations/smolagents.mdx | 238 ++++++++++++++++++++++++++++ examples/smolagents/README.md | 64 +++++--- 4 files changed, 428 insertions(+), 20 deletions(-) create mode 100644 docs/v2/examples/smolagents.mdx create mode 100644 docs/v2/integrations/smolagents.mdx diff --git a/docs/mint.json b/docs/mint.json index a6fcf4af1..1cc0c5f39 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -177,6 +177,7 @@ "v2/integrations/litellm", "v2/integrations/openai", "v2/integrations/agents_sdk", + "v2/integrations/smolagents", "v2/integrations/ibm_watsonx_ai", "v2/integrations/xai" ], diff --git a/docs/v2/examples/smolagents.mdx b/docs/v2/examples/smolagents.mdx new file mode 100644 index 000000000..498a6cc43 --- /dev/null +++ b/docs/v2/examples/smolagents.mdx @@ -0,0 +1,145 @@ +--- +title: 'Smolagents' +description: 'Orchestrate a Multi-Agent System' +--- +{/* SOURCE_FILE: examples/smolagents/multi_smolagents_system.ipynb */} + +_View Notebook on Github_ + +# Orchestrate a Multi-Agent System + +In this notebook, we will make a multi-agent web browser: an agentic system with several agents collaborating to solve problems using the web! +Run the line below to install the required dependencies: + + + +## Installation + + ```bash pip + pip install agentops duckduckgo-search markdownify smolagents + ``` + ```bash poetry + poetry add agentops duckduckgo-search markdownify smolagents + ``` + ```bash uv + uv add agentops duckduckgo-search markdownify smolagents + ``` + + +🖇️ Now we initialize the AgentOps client and load the environment variables to use the API keys. + + +```python +import agentops +from dotenv import load_dotenv +import os +import re +import requests +from markdownify import markdownify +from requests.exceptions import RequestException + +load_dotenv() +os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY", "your_api_key_here") +os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "your_openai_api_key_here") +``` + +⚡️ Our agent will be powered by `openai/gpt-4o-mini` using the `LiteLLMModel` class. + + +```python +agentops.init(auto_start_session=False) +tracer = agentops.start_trace( + trace_name="Orchestrate a Multi-Agent System", tags=["smolagents", "example", "multi-agent", "agentops-example"] +) +from smolagents import LiteLLMModel, tool ,CodeAgent, ToolCallingAgent, DuckDuckGoSearchTool +model = LiteLLMModel("openai/gpt-4o-mini") +``` + +## Create a Web Search Tool + +For web browsing, we can already use our pre-existing `DuckDuckGoSearchTool`. However, we will also create a `VisitWebpageTool` from scratch using `markdownify`. Here’s how: + + +```python +@tool +def visit_webpage(url: str) -> str: + """Visits a webpage at the given URL and returns its content as a markdown string. + + Args: + url: The URL of the webpage to visit. + + Returns: + The content of the webpage converted to Markdown, or an error message if the request fails. + """ + try: + # Send a GET request to the URL + response = requests.get(url) + response.raise_for_status() # Raise an exception for bad status codes + + # Convert the HTML content to Markdown + markdown_content = markdownify(response.text).strip() + + # Remove multiple line breaks + markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content) + + return markdown_content + + except RequestException as e: + return f"Error fetching the webpage: {str(e)}" + except Exception as e: + return f"An unexpected error occurred: {str(e)}" +``` + +Let’s test our tool: + + +```python +print(visit_webpage("https://en.wikipedia.org/wiki/Hugging_Face")[:500]) +``` + +## Build Our Multi-Agent System + +We will now use the tools `search` and `visit_webpage` to create the web agent. + + +```python +web_agent = ToolCallingAgent( + tools=[DuckDuckGoSearchTool(), visit_webpage], + model=model, + name="search", + description="Runs web searches for you. Give it your query as an argument.", +) + +manager_agent = CodeAgent( + tools=[], + model=model, + managed_agents=[web_agent], + additional_authorized_imports=["time", "numpy", "pandas"], +) +``` + +Let’s run our system with the following query: + + +```python +answer = manager_agent.run( + "If LLM trainings continue to scale up at the current rhythm until 2030, what would be the electric power in GW required to power the biggest training runs by 2030? What does that correspond to, compared to some countries? Please provide a source for any number used." +) + +print(answer) +``` + +Awesome! We've successfully run a multi-agent system. Let's end the agentops session with a "Success" state. You can also end the session with a "Failure" or "Indeterminate" state, which is set as default. + + +```python +agentops.end_trace(tracer, end_state="Success") +``` + +You can view the session in the [AgentOps dashboard](https://app.agentops.ai/sessions) by clicking the link provided after ending the session. + + + + + + \ No newline at end of file diff --git a/docs/v2/integrations/smolagents.mdx b/docs/v2/integrations/smolagents.mdx new file mode 100644 index 000000000..639b36edc --- /dev/null +++ b/docs/v2/integrations/smolagents.mdx @@ -0,0 +1,238 @@ +--- +title: Smolagents +description: "Track and analyze your Smolagents AI agents with AgentOps" +--- + +AgentOps provides seamless integration with [Smolagents](https://github.com/huggingface/smolagents), HuggingFace's lightweight framework for building AI agents. Monitor your agent workflows, tool usage, and execution traces automatically. + +## Core Concepts + +Smolagents is designed around several key concepts: + +- **Agents**: AI assistants that can use tools and reason through problems +- **Tools**: Functions that agents can call to interact with external systems +- **Models**: LLM backends that power agent reasoning (supports various providers via LiteLLM) +- **Code Execution**: Agents can write and execute Python code in sandboxed environments +- **Multi-Agent Systems**: Orchestrate multiple specialized agents working together + +## Installation + +Install AgentOps and Smolagents, along with any additional dependencies: + + + ```bash pip + pip install agentops smolagents python-dotenv + ``` + ```bash poetry + poetry add agentops smolagents python-dotenv + ``` + ```bash uv + uv add agentops smolagents python-dotenv + ``` + + +## Setting Up API Keys + +Before using Smolagents with AgentOps, you need to set up your API keys: +- **AGENTOPS_API_KEY**: From your [AgentOps Dashboard](https://app.agentops.ai/) +- **LLM API Keys**: Depending on your chosen model provider (e.g., OPENAI_API_KEY, ANTHROPIC_API_KEY) + +Set these as environment variables or in a `.env` file. + + + ```bash Export to CLI + export AGENTOPS_API_KEY="your_agentops_api_key_here" + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + ```txt Set in .env file + AGENTOPS_API_KEY="your_agentops_api_key_here" + OPENAI_API_KEY="your_openai_api_key_here" + ``` + + +Then load them in your Python code: +```python +from dotenv import load_dotenv +import os + +load_dotenv() + +AGENTOPS_API_KEY = os.getenv("AGENTOPS_API_KEY") +OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") +``` + +## Usage + +Initialize AgentOps before creating your Smolagents to automatically track all agent interactions: + +```python +import agentops +from smolagents import LiteLLMModel, ToolCallingAgent, DuckDuckGoSearchTool + +# Initialize AgentOps +agentops.init() + +# Create a model (supports various providers via LiteLLM) +model = LiteLLMModel("openai/gpt-4o-mini") + +# Create an agent with tools +agent = ToolCallingAgent( + tools=[DuckDuckGoSearchTool()], + model=model, +) + +# Run the agent +result = agent.run("What are the latest developments in AI safety research?") +print(result) +``` + +## Examples + + +```python Simple Math Agent +import agentops +from smolagents import LiteLLMModel, CodeAgent + +# Initialize AgentOps +agentops.init() + +# Create a model +model = LiteLLMModel("openai/gpt-4o-mini") + +# Create a code agent that can perform calculations +agent = CodeAgent( + tools=[], # No external tools needed for math + model=model, + additional_authorized_imports=["math", "numpy"], +) + +# Ask the agent to solve a math problem +result = agent.run( + "Calculate the compound interest on $10,000 invested at 5% annual rate " + "for 10 years, compounded monthly. Show your work." +) + +print(result) +``` + +```python Research Agent with Tools +import agentops +from smolagents import ( + LiteLLMModel, + ToolCallingAgent, + DuckDuckGoSearchTool, + tool +) + +# Initialize AgentOps +agentops.init() + +# Create a custom tool +@tool +def word_counter(text: str) -> str: + """ + Counts the number of words in a given text. + + Args: + text: The text to count words in. + + Returns: + A string with the word count. + """ + word_count = len(text.split()) + return f"The text contains {word_count} words." + +# Create model and agent +model = LiteLLMModel("openai/gpt-4o-mini") + +agent = ToolCallingAgent( + tools=[DuckDuckGoSearchTool(), word_counter], + model=model, +) + +# Run a research task +result = agent.run( + "Search for information about the James Webb Space Telescope's latest discoveries. " + "Then count how many words are in your summary." +) + +print(result) +``` + +```python Multi-Step Task Agent +import agentops +from smolagents import LiteLLMModel, CodeAgent, tool +import json + +# Initialize AgentOps +agentops.init() + +# Create tools for data processing +@tool +def save_json(data: dict, filename: str) -> str: + """ + Saves data to a JSON file. + + Args: + data: Dictionary to save + filename: Name of the file to save to + + Returns: + Success message + """ + with open(filename, 'w') as f: + json.dump(data, f, indent=2) + return f"Data saved to {filename}" + +@tool +def load_json(filename: str) -> dict: + """ + Loads data from a JSON file. + + Args: + filename: Name of the file to load from + + Returns: + The loaded data as a dictionary + """ + with open(filename, 'r') as f: + return json.load(f) + +# Create agent +model = LiteLLMModel("openai/gpt-4o-mini") + +agent = CodeAgent( + tools=[save_json, load_json], + model=model, + additional_authorized_imports=["pandas", "datetime"], +) + +# Run a multi-step data processing task +result = agent.run(""" +1. Create a dataset of 5 fictional employees with names, departments, and salaries +2. Save this data to 'employees.json' +3. Load the data back and calculate the average salary +4. Find the highest paid employee +5. Return a summary of your findings +""") + +print(result) +``` + +## More Examples + + + + Complex multi-agent web browsing system + + + Convert natural language queries to SQL + + + +Visit your [AgentOps Dashboard](https://app.agentops.ai) to see detailed traces of your Smolagents executions, tool usage, and agent reasoning steps. + + + + + \ No newline at end of file diff --git a/examples/smolagents/README.md b/examples/smolagents/README.md index bfe99cf30..851997148 100644 --- a/examples/smolagents/README.md +++ b/examples/smolagents/README.md @@ -1,32 +1,56 @@ -# SmolAgents Examples with AgentOps +# Smolagents Examples -This directory contains examples of using SmolAgents with AgentOps instrumentation. +This directory contains examples demonstrating how to use Smolagents with AgentOps for agent monitoring and observability. -## Prerequisites +## Examples -- Python >= 3.10 < 3.13 -- Install required dependencies: - ``` - pip install agentops smolagents - ``` +### 1. Simple Task Agent (`simple_task_agent.py`) +A minimal example showing how to create a single agent that can answer questions using web search. This is the best starting point for understanding Smolagents basics. -## Examples +**Features:** +- Basic agent setup with search capabilities +- AgentOps integration for tracking +- Error handling and session management + +### 2. Multi-Agent System (`multi_smolagents_system.py`) +A more complex example demonstrating a hierarchical multi-agent system with: +- Manager agent coordinating multiple specialized agents +- Web search agent with custom tools +- Code interpreter capabilities +- Tool creation and usage + +### 3. Text to SQL Agent (`text_to_sql.py`) +An example showing how to create an agent that can convert natural language queries into SQL statements. + +## Running the Examples -### 1. Multi-Agent System +1. Install dependencies: +```bash +pip install agentops smolagents python-dotenv +``` -Example: `multi_smolagents_system` -This example demonstrates: -- Multi-agent system coordination +2. Set up your API keys in a `.env` file: +```env +AGENTOPS_API_KEY=your_agentops_api_key_here +OPENAI_API_KEY=your_openai_api_key_here +``` -### 2. Text to SQL +3. Run any example: +```bash +python simple_task_agent.py +``` -Example: `text_to_sql` +4. View the results in your [AgentOps Dashboard](https://app.agentops.ai/sessions) -This example demonstrates: -- Natural language to SQL conversion +## Key Concepts -## AgentOps Integration +- **Agents**: AI assistants that can use tools and reason through problems +- **Tools**: Functions that agents can call to interact with external systems +- **Models**: LLM backends via LiteLLM (supports OpenAI, Anthropic, etc.) +- **AgentOps Integration**: Automatic tracking of all agent activities -These examples show how to use AgentOps to monitor and analyze your AI applications. AgentOps automatically instruments your SmolAgents calls to provide insights into performance, usage patterns, and model behavior. +## Learn More -To learn more about AgentOps, visit [https://www.agentops.ai](https://www.agentops.ai) +- [Smolagents Documentation](https://github.com/huggingface/smolagents) +- [AgentOps Documentation](https://docs.agentops.ai) +- [Full Integration Guide](https://docs.agentops.ai/v2/integrations/smolagents) From bd606b22dae49fc5f4ceb158407d478f676db8d8 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Fri, 13 Jun 2025 02:11:43 +0530 Subject: [PATCH 11/13] fix: Add missing newline before InstrumentorLoader class definition --- agentops/instrumentation/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/agentops/instrumentation/__init__.py b/agentops/instrumentation/__init__.py index 85a7ff4ea..e45995144 100644 --- a/agentops/instrumentation/__init__.py +++ b/agentops/instrumentation/__init__.py @@ -416,6 +416,7 @@ def _import_monitor(name: str, globals_dict=None, locals_dict=None, fromlist=(), return module + @dataclass class InstrumentorLoader: """ From 2dae98c740cf0fdce26a761905a8ee684248fe00 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Fri, 13 Jun 2025 02:19:41 +0530 Subject: [PATCH 12/13] feat: Add smolagents integration card to examples documentation --- docs/v2/examples/examples.mdx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/v2/examples/examples.mdx b/docs/v2/examples/examples.mdx index 9021e70f6..20fec2e28 100644 --- a/docs/v2/examples/examples.mdx +++ b/docs/v2/examples/examples.mdx @@ -68,6 +68,9 @@ description: 'Examples of AgentOps with various integrations' LangChain callback handler integration + } iconType="image" iconType="solid" href="/v2/integrations/smolagents"> + Track HuggingFace's smolagents with AgentOps seamlessly + From de7338c86b34ffb0121197f139496f4b78a70e47 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Fri, 13 Jun 2025 02:23:51 +0530 Subject: [PATCH 13/13] feat: Update examples and introduction documentation to include Smolagents integration --- docs/v2/examples/examples.mdx | 4 ++-- docs/v2/introduction.mdx | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/v2/examples/examples.mdx b/docs/v2/examples/examples.mdx index 20fec2e28..c7aca55a7 100644 --- a/docs/v2/examples/examples.mdx +++ b/docs/v2/examples/examples.mdx @@ -44,7 +44,7 @@ description: 'Examples of AgentOps with various integrations' Multi-agent conversations with memory capabilities - + } iconType="image" href="/v2/examples/agno"> Modern AI agent framework with teams, workflows, and tool integration @@ -68,7 +68,7 @@ description: 'Examples of AgentOps with various integrations' LangChain callback handler integration - } iconType="image" iconType="solid" href="/v2/integrations/smolagents"> + } iconType="image" href="/v2/integrations/smolagents"> Track HuggingFace's smolagents with AgentOps seamlessly diff --git a/docs/v2/introduction.mdx b/docs/v2/introduction.mdx index 02c252328..1accbb705 100644 --- a/docs/v2/introduction.mdx +++ b/docs/v2/introduction.mdx @@ -35,6 +35,7 @@ description: "AgentOps is the developer favorite platform for testing, debugging } iconType="image" href="/v2/integrations/google_adk" /> } iconType="image" href="/v2/integrations/langchain" /> } iconType="image" href="/v2/integrations/agents_sdk" /> + } iconType="image" href="/v2/integrations/smolagents" /> Observability and monitoring for your AI agents and LLM apps. And we do it all in just two lines of code...