diff --git a/agentops/instrumentation/common/attributes.py b/agentops/instrumentation/common/attributes.py
index f267d615e..809121923 100644
--- a/agentops/instrumentation/common/attributes.py
+++ b/agentops/instrumentation/common/attributes.py
@@ -252,3 +252,27 @@ def get_base_span_attributes(span: Any) -> AttributeMap:
attributes[CoreAttributes.PARENT_ID] = parent_id
return attributes
+
+
+def extract_token_usage(response: Any) -> Dict[str, int]:
+ """Extract token usage information from a response.
+
+ Args:
+ response: The response object to extract token usage from
+
+ Returns:
+ Dictionary containing token usage information
+ """
+ usage = {}
+
+ # Try to extract token counts from response
+ if hasattr(response, "usage"):
+ usage_data = response.usage
+ if hasattr(usage_data, "prompt_tokens"):
+ usage["prompt_tokens"] = usage_data.prompt_tokens
+ if hasattr(usage_data, "completion_tokens"):
+ usage["completion_tokens"] = usage_data.completion_tokens
+ if hasattr(usage_data, "total_tokens"):
+ usage["total_tokens"] = usage_data.total_tokens
+
+ return usage
diff --git a/agentops/instrumentation/smolagents/README.md b/agentops/instrumentation/smolagents/README.md
new file mode 100644
index 000000000..20d63ff37
--- /dev/null
+++ b/agentops/instrumentation/smolagents/README.md
@@ -0,0 +1,88 @@
+# SmoLAgents Instrumentation
+
+This module provides OpenTelemetry instrumentation for the SmoLAgents framework. It captures telemetry data from model operations, agent executions, and tool usage.
+
+## Features
+
+- Model operation tracking
+ - Text generation
+ - Token usage
+ - Streaming responses
+ - Latency metrics
+
+- Agent execution monitoring
+ - Step-by-step execution
+ - Planning phases
+ - Tool usage
+ - Execution time
+
+- Tool usage analytics
+ - Tool call patterns
+ - Success/failure rates
+ - Execution time
+ - Error tracking
+
+## Usage
+
+```python
+from agentops import init
+from agentops.instrumentation.smolagents import SmoLAgentsInstrumentor
+
+# Initialize AgentOps with your API key
+init(api_key="your-api-key")
+
+# The instrumentation will be automatically activated
+# All SmoLAgents operations will now be tracked
+```
+
+## Metrics Collected
+
+1. Token Usage
+ - Input tokens
+ - Output tokens
+ - Total tokens per operation
+
+2. Timing Metrics
+ - Operation duration
+ - Time to first token (streaming)
+ - Tool execution time
+ - Planning phase duration
+
+3. Agent Metrics
+ - Step counts
+ - Planning steps
+ - Tools used
+ - Success/failure rates
+
+4. Error Tracking
+ - Generation errors
+ - Tool execution errors
+ - Parsing errors
+
+## Architecture
+
+The instrumentation is built on OpenTelemetry and follows the same pattern as other AgentOps instrumentors:
+
+1. Attribute Extractors
+ - Model attributes
+ - Agent attributes
+ - Tool call attributes
+
+2. Wrappers
+ - Method wrappers for sync operations
+ - Stream wrappers for async operations
+ - Context propagation handling
+
+3. Metrics
+ - Histograms for distributions
+ - Counters for events
+ - Custom attributes for filtering
+
+## Contributing
+
+When adding new features or modifying existing ones:
+
+1. Follow the established pattern for attribute extraction
+2. Maintain context propagation
+3. Add appropriate error handling
+4. Update tests and documentation
\ No newline at end of file
diff --git a/agentops/instrumentation/smolagents/__init__.py b/agentops/instrumentation/smolagents/__init__.py
new file mode 100644
index 000000000..7eeda90f7
--- /dev/null
+++ b/agentops/instrumentation/smolagents/__init__.py
@@ -0,0 +1,8 @@
+"""SmoLAgents instrumentation for AgentOps."""
+
+LIBRARY_NAME = "smolagents"
+LIBRARY_VERSION = "1.16.0"
+
+from agentops.instrumentation.smolagents.instrumentor import SmolAgentsInstrumentor # noqa: E402
+
+__all__ = ["SmolAgentsInstrumentor"]
diff --git a/agentops/instrumentation/smolagents/attributes/agent.py b/agentops/instrumentation/smolagents/attributes/agent.py
new file mode 100644
index 000000000..d0f053a54
--- /dev/null
+++ b/agentops/instrumentation/smolagents/attributes/agent.py
@@ -0,0 +1,354 @@
+"""Attribute extractors for SmoLAgents agent operations."""
+
+from typing import Any, Dict, Optional, Tuple
+import uuid
+import json
+
+from agentops.instrumentation.common.attributes import get_common_attributes
+from agentops.semconv.agent import AgentAttributes
+from agentops.semconv.tool import ToolAttributes
+
+
+def get_agent_attributes(
+ args: Optional[Tuple] = None,
+ kwargs: Optional[Dict] = None,
+ return_value: Optional[Any] = None,
+) -> Dict[str, Any]:
+ """Extract attributes from an agent execution call.
+
+ Args:
+ args: Optional tuple of positional arguments
+ kwargs: Optional dict of keyword arguments
+ return_value: Optional return value from the function
+
+ Returns:
+ Dictionary of extracted attributes
+ """
+ attributes = get_common_attributes()
+
+ try:
+ # Extract agent instance information
+ agent_instance = None
+ if args and len(args) > 0:
+ agent_instance = args[0]
+ elif kwargs and "self" in kwargs:
+ agent_instance = kwargs["self"]
+
+ if agent_instance:
+ # Extract agent name
+ agent_name = getattr(agent_instance, "name", agent_instance.__class__.__name__)
+ attributes[AgentAttributes.AGENT_NAME] = agent_name
+
+ # Generate agent ID if not present
+ agent_id = getattr(agent_instance, "id", str(uuid.uuid4()))
+ attributes[AgentAttributes.AGENT_ID] = agent_id
+
+ # Extract agent role/type
+ attributes[AgentAttributes.AGENT_ROLE] = "executor"
+
+ # Extract tools information
+ tools = getattr(agent_instance, "tools", [])
+ if tools:
+ tool_names = []
+ for tool in tools:
+ tool_name = getattr(tool, "name", str(tool))
+ tool_names.append(tool_name)
+ attributes[AgentAttributes.AGENT_TOOLS] = json.dumps(tool_names)
+ else:
+ attributes[AgentAttributes.AGENT_TOOLS] = "[]"
+
+ # Extract managed agents information
+ managed_agents = getattr(agent_instance, "managed_agents", [])
+ if managed_agents:
+ managed_agent_names = []
+ for managed_agent in managed_agents:
+ agent_name = getattr(managed_agent, "name", managed_agent.__class__.__name__)
+ managed_agent_names.append(agent_name)
+ attributes[AgentAttributes.AGENT_MANAGED_AGENTS] = json.dumps(managed_agent_names)
+ else:
+ attributes[AgentAttributes.AGENT_MANAGED_AGENTS] = "[]"
+
+ # Extract input/task from args or kwargs
+ task_input = None
+ if args and len(args) > 1:
+ task_input = args[1]
+ elif kwargs and "task" in kwargs:
+ task_input = kwargs["task"]
+ elif kwargs and "prompt" in kwargs:
+ task_input = kwargs["prompt"]
+
+ if task_input:
+ attributes["agent.task"] = str(task_input)
+
+ # Extract return value/output
+ if return_value is not None:
+ attributes["agentops.entity.output"] = str(return_value)
+
+ except Exception:
+ # If extraction fails, continue with basic attributes
+ pass
+
+ return attributes
+
+
+def get_agent_stream_attributes(
+ args: Optional[Tuple] = None,
+ kwargs: Optional[Dict] = None,
+ return_value: Optional[Any] = None,
+) -> Dict[str, Any]:
+ """Extract attributes from an agent streaming call.
+
+ Args:
+ args: Optional tuple of positional arguments
+ kwargs: Optional dict of keyword arguments
+ return_value: Optional return value from the function
+
+ Returns:
+ Dictionary of extracted attributes
+ """
+ attributes = get_common_attributes()
+
+ try:
+ # Extract reasoning/task information
+ if kwargs:
+ if "max_steps" in kwargs:
+ attributes["agent.max_steps"] = str(kwargs["max_steps"])
+
+ # Extract task/reasoning from various parameter names
+ task_info = None
+ for param_name in ["task", "prompt", "reasoning", "query"]:
+ if param_name in kwargs:
+ task_info = kwargs[param_name]
+ break
+
+ if task_info:
+ attributes["agent.reasoning"] = str(task_info)
+
+ # Extract from args
+ if args and len(args) > 1:
+ task_info = args[1]
+ attributes["agent.reasoning"] = str(task_info)
+
+ except Exception:
+ # If extraction fails, continue with basic attributes
+ pass
+
+ return attributes
+
+
+def get_agent_step_attributes(
+ args: Optional[Tuple] = None,
+ kwargs: Optional[Dict] = None,
+ return_value: Optional[Any] = None,
+) -> Dict[str, Any]:
+ """Extract attributes from an agent step execution.
+
+ Args:
+ args: Optional tuple of positional arguments
+ kwargs: Optional dict of keyword arguments
+ return_value: Optional return value from the function
+
+ Returns:
+ Dictionary of extracted attributes
+ """
+ attributes = get_common_attributes()
+
+ try:
+ # Try to extract step information
+ step_number = getattr(args[0] if args else None, "step_count", None)
+ if step_number is not None:
+ attributes["agent.step_number"] = str(step_number)
+
+ # Extract step name/type
+ step_name = "ActionStep" # Default for smolagents
+ attributes["agent.name"] = step_name
+
+ # Extract return value
+ if return_value is not None:
+ attributes["agentops.entity.output"] = str(return_value)
+
+ except Exception:
+ # If extraction fails, continue with basic attributes
+ pass
+
+ return attributes
+
+
+def get_tool_call_attributes(
+ args: Optional[Tuple] = None,
+ kwargs: Optional[Dict] = None,
+ return_value: Optional[Any] = None,
+) -> Dict[str, Any]:
+ """Extract attributes from a tool call execution.
+
+ Args:
+ args: Optional tuple of positional arguments
+ kwargs: Optional dict of keyword arguments
+ return_value: Optional return value from the function
+
+ Returns:
+ Dictionary of extracted attributes
+ """
+ attributes = get_common_attributes()
+
+ try:
+ # Generate tool execution ID
+ tool_id = str(uuid.uuid4())
+ attributes[ToolAttributes.TOOL_ID] = tool_id
+
+ # Extract tool information from various sources
+ tool_name = "unknown"
+ tool_description = "unknown"
+ tool_parameters = "{}"
+
+ # Try to extract from instance (first arg)
+ if args and len(args) > 0:
+ instance = args[0]
+ if hasattr(instance, "name"):
+ tool_name = instance.name
+ if hasattr(instance, "description"):
+ tool_description = instance.description
+
+ # Try to extract from kwargs
+ if kwargs:
+ if "tool_call" in kwargs:
+ tool_call = kwargs["tool_call"]
+ if hasattr(tool_call, "function"):
+ tool_name = tool_call.function.name
+ if hasattr(tool_call.function, "arguments"):
+ tool_parameters = tool_call.function.arguments
+ elif "name" in kwargs:
+ tool_name = kwargs["name"]
+ elif "function_name" in kwargs:
+ tool_name = kwargs["function_name"]
+
+ # Extract parameters
+ if "parameters" in kwargs:
+ tool_parameters = json.dumps(kwargs["parameters"])
+ elif "arguments" in kwargs:
+ tool_parameters = json.dumps(kwargs["arguments"])
+ elif "args" in kwargs:
+ tool_parameters = json.dumps(kwargs["args"])
+
+ # Set tool attributes
+ attributes[ToolAttributes.TOOL_NAME] = tool_name
+ attributes[ToolAttributes.TOOL_DESCRIPTION] = tool_description
+ attributes[ToolAttributes.TOOL_PARAMETERS] = tool_parameters
+ attributes[ToolAttributes.TOOL_STATUS] = "pending"
+ attributes[ToolAttributes.TOOL_OUTPUT_TYPE] = "unknown"
+ attributes[ToolAttributes.TOOL_INPUTS] = "{}"
+
+ # Extract return value
+ if return_value is not None:
+ attributes["tool.result"] = str(return_value)
+ attributes[ToolAttributes.TOOL_STATUS] = "success"
+
+ except Exception:
+ # If extraction fails, set basic attributes
+ attributes[ToolAttributes.TOOL_NAME] = "unknown"
+ attributes[ToolAttributes.TOOL_DESCRIPTION] = "unknown"
+ attributes[ToolAttributes.TOOL_ID] = str(uuid.uuid4())
+ attributes[ToolAttributes.TOOL_PARAMETERS] = "{}"
+ attributes[ToolAttributes.TOOL_STATUS] = "pending"
+
+ return attributes
+
+
+def get_planning_step_attributes(
+ args: Optional[Tuple] = None,
+ kwargs: Optional[Dict] = None,
+ return_value: Optional[Any] = None,
+) -> Dict[str, Any]:
+ """Extract attributes from a planning step execution.
+
+ Args:
+ args: Optional tuple of positional arguments
+ kwargs: Optional dict of keyword arguments
+ return_value: Optional return value from the function
+
+ Returns:
+ Dictionary of extracted attributes
+ """
+ attributes = get_common_attributes()
+
+ try:
+ # Extract planning information
+ if kwargs:
+ if "planning_step" in kwargs:
+ step = kwargs["planning_step"]
+ attributes["agent.planning.step"] = str(step)
+ if "reasoning" in kwargs:
+ attributes["agent.planning.reasoning"] = str(kwargs["reasoning"])
+
+ # Extract return value
+ if return_value is not None:
+ attributes["agentops.entity.output"] = str(return_value)
+
+ except Exception:
+ # If extraction fails, continue with basic attributes
+ pass
+
+ return attributes
+
+
+def get_managed_agent_attributes(
+ args: Optional[Tuple] = None,
+ kwargs: Optional[Dict] = None,
+ return_value: Optional[Any] = None,
+) -> Dict[str, Any]:
+ """Extract attributes from a managed agent call.
+
+ Args:
+ args: Optional tuple of positional arguments
+ kwargs: Optional dict of keyword arguments
+ return_value: Optional return value from the function
+
+ Returns:
+ Dictionary of extracted attributes
+ """
+ attributes = get_common_attributes()
+
+ try:
+ # Extract managed agent information
+ agent_instance = None
+ if args and len(args) > 0:
+ agent_instance = args[0]
+ elif kwargs and "agent" in kwargs:
+ agent_instance = kwargs["agent"]
+
+ if agent_instance:
+ # Extract agent details
+ agent_name = getattr(agent_instance, "name", agent_instance.__class__.__name__)
+ agent_id = getattr(agent_instance, "id", str(uuid.uuid4()))
+ agent_description = getattr(agent_instance, "description", "")
+
+ attributes[AgentAttributes.AGENT_NAME] = agent_name
+ attributes[AgentAttributes.AGENT_ID] = agent_id
+ attributes[AgentAttributes.AGENT_ROLE] = "managed"
+ attributes[AgentAttributes.AGENT_TYPE] = agent_instance.__class__.__name__
+
+ if agent_description:
+ attributes[AgentAttributes.AGENT_DESCRIPTION] = agent_description
+
+ # Check if this agent provides run summaries
+ attributes["agent.provide_run_summary"] = "false" # Default for smolagents
+
+ # Extract task information
+ task = None
+ if args and len(args) > 1:
+ task = args[1]
+ elif kwargs and "task" in kwargs:
+ task = kwargs["task"]
+
+ if task:
+ attributes["agent.task"] = str(task)
+
+ # Extract return value
+ if return_value is not None:
+ attributes["agentops.entity.output"] = str(return_value)
+
+ except Exception:
+ # If extraction fails, continue with basic attributes
+ pass
+
+ return attributes
diff --git a/agentops/instrumentation/smolagents/attributes/model.py b/agentops/instrumentation/smolagents/attributes/model.py
new file mode 100644
index 000000000..15513babf
--- /dev/null
+++ b/agentops/instrumentation/smolagents/attributes/model.py
@@ -0,0 +1,205 @@
+"""Attribute extractors for SmoLAgents model operations."""
+
+from typing import Any, Dict, Optional, Tuple
+import json
+
+from agentops.instrumentation.common.attributes import (
+ get_common_attributes,
+)
+from agentops.semconv.message import MessageAttributes
+from agentops.semconv.span_attributes import SpanAttributes
+
+
+def get_model_attributes(
+ args: Optional[Tuple] = None,
+ kwargs: Optional[Dict] = None,
+ return_value: Optional[Any] = None,
+) -> Dict[str, Any]:
+ """Extract attributes from a model generation call.
+
+ Args:
+ args: Optional tuple of positional arguments
+ kwargs: Optional dict of keyword arguments
+ return_value: Optional return value from the function
+
+ Returns:
+ Dictionary of extracted attributes
+ """
+ attributes = get_common_attributes()
+
+ try:
+ # Extract model name from various sources
+ model_name = "unknown"
+
+ # Try to get from kwargs
+ if kwargs:
+ if "model" in kwargs:
+ model_name = kwargs["model"]
+ elif kwargs.get("self") and hasattr(kwargs["self"], "model_id"):
+ model_name = kwargs["self"].model_id
+
+ # Try to get from args (instance is usually first arg in methods)
+ if model_name == "unknown" and args and len(args) > 0:
+ instance = args[0]
+ if hasattr(instance, "model_id"):
+ model_name = instance.model_id
+
+ # Set model attributes
+ attributes[SpanAttributes.LLM_REQUEST_MODEL] = model_name
+
+ # Extract messages from kwargs
+ if kwargs and "messages" in kwargs:
+ messages = kwargs["messages"]
+ if isinstance(messages, list):
+ for i, message in enumerate(messages):
+ message_dict = message
+ if hasattr(message, "to_dict"):
+ message_dict = message.to_dict()
+ elif hasattr(message, "__dict__"):
+ message_dict = message.__dict__
+
+ if isinstance(message_dict, dict):
+ # Set role
+ role = message_dict.get("role", "user")
+ attributes[MessageAttributes.PROMPT_ROLE.format(i=i)] = role
+
+ # Set content
+ content = message_dict.get("content", "")
+ if content:
+ attributes[MessageAttributes.PROMPT_CONTENT.format(i=i)] = str(content)
+
+ # Extract tools from kwargs
+ if kwargs and "tools_to_call_from" in kwargs:
+ tools = kwargs["tools_to_call_from"]
+ if tools and isinstance(tools, list):
+ for i, tool in enumerate(tools):
+ tool_name = getattr(tool, "name", "unknown")
+ tool_description = getattr(tool, "description", "")
+
+ attributes[MessageAttributes.TOOL_CALL_NAME.format(i=i)] = tool_name
+ if tool_description:
+ attributes[MessageAttributes.TOOL_CALL_DESCRIPTION.format(i=i)] = tool_description
+
+ # Extract additional parameters
+ if kwargs:
+ if "temperature" in kwargs:
+ attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] = kwargs["temperature"]
+ if "max_tokens" in kwargs:
+ attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] = kwargs["max_tokens"]
+ if "stop_sequences" in kwargs:
+ attributes[SpanAttributes.LLM_REQUEST_STOP_SEQUENCES] = json.dumps(kwargs["stop_sequences"])
+
+ # Extract response attributes
+ if return_value:
+ try:
+ # Handle ChatMessage response
+ if hasattr(return_value, "content"):
+ attributes[MessageAttributes.COMPLETION_CONTENT.format(i=0)] = str(return_value.content)
+ if hasattr(return_value, "role"):
+ attributes[MessageAttributes.COMPLETION_ROLE.format(i=0)] = return_value.role
+
+ # Handle tool calls in response
+ if hasattr(return_value, "tool_calls") and return_value.tool_calls:
+ for j, tool_call in enumerate(return_value.tool_calls):
+ if hasattr(tool_call, "function"):
+ attributes[
+ MessageAttributes.COMPLETION_TOOL_CALL_NAME.format(i=0, j=j)
+ ] = tool_call.function.name
+ if hasattr(tool_call.function, "arguments"):
+ attributes[
+ MessageAttributes.COMPLETION_TOOL_CALL_ARGUMENTS.format(i=0, j=j)
+ ] = tool_call.function.arguments
+ if hasattr(tool_call, "id"):
+ attributes[MessageAttributes.COMPLETION_TOOL_CALL_ID.format(i=0, j=j)] = tool_call.id
+
+ # Extract token usage
+ if hasattr(return_value, "token_usage") and return_value.token_usage:
+ token_usage = return_value.token_usage
+ if hasattr(token_usage, "input_tokens"):
+ attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = token_usage.input_tokens
+ if hasattr(token_usage, "output_tokens"):
+ attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = token_usage.output_tokens
+ if hasattr(token_usage, "total_tokens"):
+ attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = token_usage.total_tokens
+
+ # Extract response ID
+ if hasattr(return_value, "raw") and return_value.raw:
+ raw_response = return_value.raw
+ if hasattr(raw_response, "id"):
+ attributes[SpanAttributes.LLM_RESPONSE_ID] = raw_response.id
+
+ except Exception:
+ # If we can't extract response attributes, continue with what we have
+ pass
+
+ except Exception:
+ # If extraction fails, return basic attributes
+ pass
+
+ return attributes
+
+
+def get_stream_attributes(
+ args: Optional[Tuple] = None,
+ kwargs: Optional[Dict] = None,
+ return_value: Optional[Any] = None,
+) -> Dict[str, Any]:
+ """Extract attributes from a streaming model generation call.
+
+ Args:
+ args: Optional tuple of positional arguments
+ kwargs: Optional dict of keyword arguments
+ return_value: Optional return value from the function
+
+ Returns:
+ Dictionary of extracted attributes
+ """
+ attributes = get_common_attributes()
+
+ try:
+ # Extract model name
+ model_name = "unknown"
+ if kwargs and kwargs.get("self") and hasattr(kwargs["self"], "model_id"):
+ model_name = kwargs["self"].model_id
+ elif args and len(args) > 0 and hasattr(args[0], "model_id"):
+ model_name = args[0].model_id
+
+ attributes[SpanAttributes.LLM_REQUEST_MODEL] = model_name
+ attributes["gen_ai.request.streaming"] = True
+
+ # Extract messages for streaming
+ if kwargs and "messages" in kwargs:
+ messages = kwargs["messages"]
+ if isinstance(messages, list):
+ for i, message in enumerate(messages):
+ message_dict = message
+ if hasattr(message, "to_dict"):
+ message_dict = message.to_dict()
+ elif hasattr(message, "__dict__"):
+ message_dict = message.__dict__
+
+ if isinstance(message_dict, dict):
+ role = message_dict.get("role", "user")
+ attributes[MessageAttributes.PROMPT_ROLE.format(i=i)] = role
+
+ content = message_dict.get("content", "")
+ if content:
+ attributes[MessageAttributes.PROMPT_CONTENT.format(i=i)] = str(content)
+
+ # Extract tools for streaming
+ if kwargs and "tools_to_call_from" in kwargs:
+ tools = kwargs["tools_to_call_from"]
+ if tools and isinstance(tools, list):
+ for i, tool in enumerate(tools):
+ tool_name = getattr(tool, "name", "unknown")
+ tool_description = getattr(tool, "description", "")
+
+ attributes[MessageAttributes.TOOL_CALL_NAME.format(i=i)] = tool_name
+ if tool_description:
+ attributes[MessageAttributes.TOOL_CALL_DESCRIPTION.format(i=i)] = tool_description
+
+ except Exception:
+ # If extraction fails, return basic attributes
+ pass
+
+ return attributes
diff --git a/agentops/instrumentation/smolagents/instrumentor.py b/agentops/instrumentation/smolagents/instrumentor.py
new file mode 100644
index 000000000..37b45b750
--- /dev/null
+++ b/agentops/instrumentation/smolagents/instrumentor.py
@@ -0,0 +1,242 @@
+"""SmoLAgents instrumentation for AgentOps."""
+
+from typing import Collection
+from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
+from opentelemetry.trace import get_tracer, SpanKind
+from wrapt import wrap_function_wrapper
+
+from agentops.instrumentation.common.wrappers import unwrap
+
+# Define LIBRARY_VERSION directly to avoid circular import
+LIBRARY_VERSION = "1.16.0"
+
+# Import attribute handlers
+try:
+ from agentops.instrumentation.smolagents.attributes.agent import (
+ get_agent_attributes,
+ get_tool_call_attributes,
+ get_planning_step_attributes,
+ get_agent_step_attributes,
+ get_agent_stream_attributes,
+ get_managed_agent_attributes,
+ )
+ from agentops.instrumentation.smolagents.attributes.model import (
+ get_model_attributes,
+ get_stream_attributes,
+ )
+except ImportError:
+ # Fallback functions if imports fail
+ def get_agent_attributes(*args, **kwargs):
+ return {}
+
+ def get_tool_call_attributes(*args, **kwargs):
+ return {}
+
+ def get_planning_step_attributes(*args, **kwargs):
+ return {}
+
+ def get_agent_step_attributes(*args, **kwargs):
+ return {}
+
+ def get_agent_stream_attributes(*args, **kwargs):
+ return {}
+
+ def get_managed_agent_attributes(*args, **kwargs):
+ return {}
+
+ def get_model_attributes(*args, **kwargs):
+ return {}
+
+ def get_stream_attributes(*args, **kwargs):
+ return {}
+
+
+class SmolAgentsInstrumentor(BaseInstrumentor):
+ """Instrumentor for SmoLAgents library."""
+
+ def instrumentation_dependencies(self) -> Collection[str]:
+ return (
+ "smolagents >= 1.0.0",
+ "litellm",
+ )
+
+ def _instrument(self, **kwargs):
+ """Instrument SmoLAgents with AgentOps telemetry."""
+ tracer_provider = kwargs.get("tracer_provider")
+ tracer = get_tracer(__name__, LIBRARY_VERSION, tracer_provider)
+
+ # Core agent operations
+ wrap_function_wrapper("smolagents.agents", "CodeAgent.run", self._agent_run_wrapper(tracer))
+
+ wrap_function_wrapper("smolagents.agents", "ToolCallingAgent.run", self._agent_run_wrapper(tracer))
+
+ # Tool calling operations
+ wrap_function_wrapper(
+ "smolagents.agents", "ToolCallingAgent.execute_tool_call", self._tool_execution_wrapper(tracer)
+ )
+
+ # Model operations with proper model name extraction
+ wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate", self._llm_wrapper(tracer))
+
+ wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate_stream", self._llm_wrapper(tracer))
+
+ def _agent_run_wrapper(self, tracer):
+ """Wrapper for agent run methods."""
+
+ def wrapper(wrapped, instance, args, kwargs):
+ # Get proper agent name - handle None case
+ agent_name = getattr(instance, "name", None)
+ if not agent_name: # Handle None, empty string, or missing attribute
+ agent_name = instance.__class__.__name__
+
+ span_name = f"{agent_name}.run"
+
+ with tracer.start_as_current_span(
+ span_name,
+ kind=SpanKind.CLIENT,
+ ) as span:
+ # Extract attributes
+ attributes = get_agent_attributes(args=(instance,) + args, kwargs=kwargs)
+
+ # Fix managed agents attribute
+ if hasattr(instance, "managed_agents") and instance.managed_agents:
+ managed_agent_names = []
+ for agent in instance.managed_agents:
+ name = getattr(agent, "name", None)
+ if not name: # Handle None case for managed agents too
+ name = agent.__class__.__name__
+ managed_agent_names.append(name)
+ attributes["agent.managed_agents"] = str(managed_agent_names)
+ else:
+ attributes["agent.managed_agents"] = "[]"
+
+ for key, value in attributes.items():
+ if value is not None:
+ span.set_attribute(key, value)
+
+ try:
+ result = wrapped(*args, **kwargs)
+
+ # Set output attribute
+ if result is not None:
+ span.set_attribute("agentops.entity.output", str(result))
+
+ return result
+ except Exception as e:
+ span.record_exception(e)
+ raise
+
+ return wrapper
+
+ def _tool_execution_wrapper(self, tracer):
+ """Wrapper for tool execution methods."""
+
+ def wrapper(wrapped, instance, args, kwargs):
+ # Extract tool name for better span naming
+ tool_name = "unknown"
+ if args and len(args) > 0:
+ tool_call = args[0]
+ if hasattr(tool_call, "function"):
+ tool_name = tool_call.function.name
+
+ span_name = f"tool.{tool_name}" if tool_name != "unknown" else "tool.execute"
+
+ with tracer.start_as_current_span(
+ span_name,
+ kind=SpanKind.CLIENT,
+ ) as span:
+ # Extract tool information from kwargs or args
+ tool_params = "{}"
+
+ # Try to extract tool call information
+ if args and len(args) > 0:
+ tool_call = args[0]
+ if hasattr(tool_call, "function"):
+ if hasattr(tool_call.function, "arguments"):
+ tool_params = str(tool_call.function.arguments)
+
+ # Extract attributes
+ attributes = get_tool_call_attributes(args=(instance,) + args, kwargs=kwargs)
+
+ # Override with better tool information if available
+ if tool_name != "unknown":
+ attributes["tool.name"] = tool_name
+ attributes["tool.parameters"] = tool_params
+
+ for key, value in attributes.items():
+ if value is not None:
+ span.set_attribute(key, value)
+
+ try:
+ result = wrapped(*args, **kwargs)
+
+ # Set success status and result
+ span.set_attribute("tool.status", "success")
+ if result is not None:
+ span.set_attribute("tool.result", str(result))
+
+ return result
+ except Exception as e:
+ span.set_attribute("tool.status", "error")
+ span.record_exception(e)
+ raise
+
+ return wrapper
+
+ def _llm_wrapper(self, tracer):
+ """Wrapper for LLM generation methods with proper model name extraction."""
+
+ def wrapper(wrapped, instance, args, kwargs):
+ # Extract model name from instance
+ model_name = getattr(instance, "model_id", "unknown")
+
+ # Determine if this is streaming
+ is_streaming = "generate_stream" in wrapped.__name__
+ operation = "generate_stream" if is_streaming else "generate"
+ span_name = f"litellm.{operation} ({model_name})" if model_name != "unknown" else f"litellm.{operation}"
+
+ with tracer.start_as_current_span(
+ span_name,
+ kind=SpanKind.CLIENT,
+ ) as span:
+ # Extract attributes
+ if is_streaming:
+ attributes = get_stream_attributes(args=(instance,) + args, kwargs=kwargs)
+ else:
+ attributes = get_model_attributes(args=(instance,) + args, kwargs=kwargs)
+
+ # Ensure model name is properly set
+ attributes["gen_ai.request.model"] = model_name
+
+ for key, value in attributes.items():
+ if value is not None:
+ span.set_attribute(key, value)
+
+ try:
+ result = wrapped(*args, **kwargs)
+
+ # Extract response attributes if available
+ if result and hasattr(result, "content"):
+ span.set_attribute("gen_ai.completion.0.content", str(result.content))
+ if result and hasattr(result, "token_usage"):
+ token_usage = result.token_usage
+ if hasattr(token_usage, "input_tokens"):
+ span.set_attribute("gen_ai.usage.prompt_tokens", token_usage.input_tokens)
+ if hasattr(token_usage, "output_tokens"):
+ span.set_attribute("gen_ai.usage.completion_tokens", token_usage.output_tokens)
+
+ return result
+ except Exception as e:
+ span.record_exception(e)
+ raise
+
+ return wrapper
+
+ def _uninstrument(self, **kwargs):
+ """Remove instrumentation."""
+ # Unwrap all instrumented methods
+ unwrap("smolagents.agents", "CodeAgent.run")
+ unwrap("smolagents.agents", "ToolCallingAgent.run")
+ unwrap("smolagents.agents", "ToolCallingAgent.execute_tool_call")
+ unwrap("smolagents.models", "LiteLLMModel.generate")
+ unwrap("smolagents.models", "LiteLLMModel.generate_stream")
diff --git a/agentops/instrumentation/smolagents/stream_wrapper.py b/agentops/instrumentation/smolagents/stream_wrapper.py
new file mode 100644
index 000000000..fb33a5473
--- /dev/null
+++ b/agentops/instrumentation/smolagents/stream_wrapper.py
@@ -0,0 +1,258 @@
+"""Stream wrapper for SmoLAgents model streaming responses."""
+
+import time
+import uuid
+from typing import Any, Generator, Optional
+from opentelemetry.trace import Status, StatusCode, Span
+
+from agentops.semconv.message import MessageAttributes
+from agentops.semconv.agent import AgentAttributes
+from agentops.semconv.tool import ToolAttributes
+from .attributes.model import get_stream_attributes
+from agentops.semconv.span_attributes import SpanAttributes
+
+
+def model_stream_wrapper(tracer):
+ """Wrapper for model streaming methods.
+
+ Args:
+ tracer: OpenTelemetry tracer
+
+ Returns:
+ Wrapped function
+ """
+
+ def wrapper(wrapped, instance, args, kwargs):
+ messages = kwargs.get("messages", [])
+ model_id = instance.model_id if hasattr(instance, "model_id") else "unknown"
+
+ with tracer.start_as_current_span(
+ name=f"{model_id}.generate_stream", attributes=get_stream_attributes(model_id=model_id, messages=messages)
+ ) as span:
+ try:
+ # Start streaming
+ stream = wrapped(*args, **kwargs)
+ first_token_received = False
+ start_time = time.time()
+ accumulated_text = ""
+
+ # Process stream
+ for chunk in stream:
+ if not first_token_received:
+ first_token_received = True
+ span.set_attribute("gen_ai.time_to_first_token", time.time() - start_time)
+
+ # Accumulate text and update attributes
+ if hasattr(chunk, "content") and chunk.content:
+ accumulated_text += chunk.content
+ span.set_attribute(MessageAttributes.COMPLETION_CONTENT.format(i=0), accumulated_text)
+ span.set_attribute(MessageAttributes.COMPLETION_TYPE.format(i=0), "text")
+
+ yield chunk
+
+ # Set final attributes
+ span.set_attribute("gen_ai.streaming_duration", time.time() - start_time)
+ span.set_status(Status(StatusCode.OK))
+
+ except Exception as e:
+ span.set_status(Status(StatusCode.ERROR, str(e)))
+ raise
+
+ return wrapper
+
+
+def agent_stream_wrapper(tracer):
+ """Wrapper for agent streaming methods.
+
+ Args:
+ tracer: OpenTelemetry tracer
+
+ Returns:
+ Wrapped function
+ """
+
+ def wrapper(wrapped, instance, args, kwargs):
+ task = kwargs.get("task", args[0] if args else "unknown")
+ agent_type = instance.__class__.__name__
+ agent_id = str(uuid.uuid4())
+
+ with tracer.start_as_current_span(
+ name=f"{agent_type}.run_stream",
+ attributes={
+ AgentAttributes.AGENT_ID: agent_id,
+ AgentAttributes.AGENT_NAME: agent_type,
+ AgentAttributes.AGENT_ROLE: "executor",
+ AgentAttributes.AGENT_REASONING: task,
+ },
+ ) as span:
+ try:
+ # Initialize counters
+ step_count = 0
+ planning_steps = 0
+ tools_used = set()
+ start_time = time.time()
+
+ # Process stream
+ stream = wrapped(*args, **kwargs)
+ for step in stream:
+ step_count += 1
+
+ # Track step types
+ if hasattr(step, "type"):
+ if step.type == "planning":
+ planning_steps += 1
+ elif step.type == "tool_call":
+ tools_used.add(step.tool_name)
+ # Add tool-specific attributes
+ span.set_attribute(ToolAttributes.TOOL_NAME, step.tool_name)
+ if hasattr(step, "arguments"):
+ span.set_attribute(ToolAttributes.TOOL_PARAMETERS, step.arguments)
+
+ # Update span attributes
+ span.set_attribute("agent.step_count", step_count)
+ span.set_attribute("agent.planning_steps", planning_steps)
+ span.set_attribute(AgentAttributes.AGENT_TOOLS, list(tools_used))
+
+ yield step
+
+ # Set final attributes
+ span.set_attribute("agent.execution_time", time.time() - start_time)
+ span.set_status(Status(StatusCode.OK))
+
+ except Exception as e:
+ span.set_status(Status(StatusCode.ERROR, str(e)))
+ raise
+
+ return wrapper
+
+
+class SmoLAgentsStreamWrapper:
+ """Wrapper for streaming responses from SmoLAgents models."""
+
+ def __init__(
+ self,
+ stream: Generator,
+ span: Span,
+ model_id: Optional[str] = None,
+ ):
+ """Initialize the stream wrapper.
+
+ Args:
+ stream: The original generator from the model
+ span: The OpenTelemetry span to track the stream
+ model_id: Optional model identifier
+ """
+ self._stream = stream
+ self._span = span
+ self._model_id = model_id
+ self._chunks_received = 0
+ self._full_content = []
+ self._tool_calls = []
+ self._current_tool_call = None
+ self._token_count = 0
+
+ def __iter__(self):
+ """Iterate over the stream."""
+ return self
+
+ def __next__(self):
+ """Get the next chunk from the stream."""
+ try:
+ chunk = next(self._stream)
+ self._process_chunk(chunk)
+ return chunk
+ except StopIteration:
+ self._finalize_stream()
+ raise
+
+ def _process_chunk(self, chunk: Any) -> None:
+ """Process a chunk from the stream.
+
+ Args:
+ chunk: The chunk to process
+ """
+ self._chunks_received += 1
+
+ # Handle ChatMessageStreamDelta objects
+ if hasattr(chunk, "content") and chunk.content:
+ self._full_content.append(chunk.content)
+
+ # Handle tool calls in chunks
+ if hasattr(chunk, "tool_calls") and chunk.tool_calls:
+ for tool_call in chunk.tool_calls:
+ if tool_call.id not in [tc["id"] for tc in self._tool_calls]:
+ self._tool_calls.append(
+ {
+ "id": tool_call.id,
+ "type": tool_call.type,
+ "name": tool_call.function.name,
+ "arguments": tool_call.function.arguments,
+ }
+ )
+
+ # Track token usage if available
+ if hasattr(chunk, "token_usage") and chunk.token_usage:
+ if hasattr(chunk.token_usage, "output_tokens"):
+ self._token_count += chunk.token_usage.output_tokens
+
+ # Update span with chunk information
+ self._span.add_event(
+ "stream_chunk_received",
+ {
+ "chunk_number": self._chunks_received,
+ "chunk_content_length": len(chunk.content) if hasattr(chunk, "content") and chunk.content else 0,
+ },
+ )
+
+ def _finalize_stream(self) -> None:
+ """Finalize the stream and update span attributes."""
+ # Combine all content chunks
+ full_content = "".join(self._full_content)
+
+ # Set final attributes on the span
+ attributes = {
+ MessageAttributes.COMPLETION_CONTENT.format(i=0): full_content,
+ "stream.chunks_received": self._chunks_received,
+ "stream.total_content_length": len(full_content),
+ }
+
+ # Add tool calls if any
+ if self._tool_calls:
+ for j, tool_call in enumerate(self._tool_calls):
+ attributes.update(
+ {
+ MessageAttributes.COMPLETION_TOOL_CALL_ID.format(i=0, j=j): tool_call["id"],
+ MessageAttributes.COMPLETION_TOOL_CALL_TYPE.format(i=0, j=j): tool_call["type"],
+ MessageAttributes.COMPLETION_TOOL_CALL_NAME.format(i=0, j=j): tool_call["name"],
+ MessageAttributes.COMPLETION_TOOL_CALL_ARGUMENTS.format(i=0, j=j): str(tool_call["arguments"]),
+ }
+ )
+
+ # Add token usage if tracked
+ if self._token_count > 0:
+ attributes[SpanAttributes.LLM_USAGE_STREAMING_TOKENS] = self._token_count
+
+ self._span.set_attributes(attributes)
+
+ def close(self) -> None:
+ """Close the stream wrapper."""
+ if hasattr(self._stream, "close"):
+ self._stream.close()
+
+
+def wrap_stream(
+ stream: Generator,
+ span: Span,
+ model_id: Optional[str] = None,
+) -> SmoLAgentsStreamWrapper:
+ """Wrap a streaming response from a SmoLAgents model.
+
+ Args:
+ stream: The original generator from the model
+ span: The OpenTelemetry span to track the stream
+ model_id: Optional model identifier
+
+ Returns:
+ SmoLAgentsStreamWrapper: The wrapped stream
+ """
+ return SmoLAgentsStreamWrapper(stream, span, model_id)
diff --git a/docs/mint.json b/docs/mint.json
index 1784b16f7..ae2a86df8 100644
--- a/docs/mint.json
+++ b/docs/mint.json
@@ -178,6 +178,7 @@
"v2/integrations/litellm",
"v2/integrations/openai",
"v2/integrations/agents_sdk",
+ "v2/integrations/smolagents",
"v2/integrations/ibm_watsonx_ai",
"v2/integrations/xai"
],
diff --git a/docs/v2/examples/examples.mdx b/docs/v2/examples/examples.mdx
index 9021e70f6..c7aca55a7 100644
--- a/docs/v2/examples/examples.mdx
+++ b/docs/v2/examples/examples.mdx
@@ -44,7 +44,7 @@ description: 'Examples of AgentOps with various integrations'
Multi-agent conversations with memory capabilities
-
+ } iconType="image" href="/v2/examples/agno">
Modern AI agent framework with teams, workflows, and tool integration
@@ -68,6 +68,9 @@ description: 'Examples of AgentOps with various integrations'
LangChain callback handler integration
+ } iconType="image" href="/v2/integrations/smolagents">
+ Track HuggingFace's smolagents with AgentOps seamlessly
+
diff --git a/docs/v2/examples/smolagents.mdx b/docs/v2/examples/smolagents.mdx
new file mode 100644
index 000000000..498a6cc43
--- /dev/null
+++ b/docs/v2/examples/smolagents.mdx
@@ -0,0 +1,145 @@
+---
+title: 'Smolagents'
+description: 'Orchestrate a Multi-Agent System'
+---
+{/* SOURCE_FILE: examples/smolagents/multi_smolagents_system.ipynb */}
+
+_View Notebook on Github_
+
+# Orchestrate a Multi-Agent System
+
+In this notebook, we will make a multi-agent web browser: an agentic system with several agents collaborating to solve problems using the web!
+Run the line below to install the required dependencies:
+
+
+
+## Installation
+
+ ```bash pip
+ pip install agentops duckduckgo-search markdownify smolagents
+ ```
+ ```bash poetry
+ poetry add agentops duckduckgo-search markdownify smolagents
+ ```
+ ```bash uv
+ uv add agentops duckduckgo-search markdownify smolagents
+ ```
+
+
+🖇️ Now we initialize the AgentOps client and load the environment variables to use the API keys.
+
+
+```python
+import agentops
+from dotenv import load_dotenv
+import os
+import re
+import requests
+from markdownify import markdownify
+from requests.exceptions import RequestException
+
+load_dotenv()
+os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY", "your_api_key_here")
+os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "your_openai_api_key_here")
+```
+
+⚡️ Our agent will be powered by `openai/gpt-4o-mini` using the `LiteLLMModel` class.
+
+
+```python
+agentops.init(auto_start_session=False)
+tracer = agentops.start_trace(
+ trace_name="Orchestrate a Multi-Agent System", tags=["smolagents", "example", "multi-agent", "agentops-example"]
+)
+from smolagents import LiteLLMModel, tool ,CodeAgent, ToolCallingAgent, DuckDuckGoSearchTool
+model = LiteLLMModel("openai/gpt-4o-mini")
+```
+
+## Create a Web Search Tool
+
+For web browsing, we can already use our pre-existing `DuckDuckGoSearchTool`. However, we will also create a `VisitWebpageTool` from scratch using `markdownify`. Here’s how:
+
+
+```python
+@tool
+def visit_webpage(url: str) -> str:
+ """Visits a webpage at the given URL and returns its content as a markdown string.
+
+ Args:
+ url: The URL of the webpage to visit.
+
+ Returns:
+ The content of the webpage converted to Markdown, or an error message if the request fails.
+ """
+ try:
+ # Send a GET request to the URL
+ response = requests.get(url)
+ response.raise_for_status() # Raise an exception for bad status codes
+
+ # Convert the HTML content to Markdown
+ markdown_content = markdownify(response.text).strip()
+
+ # Remove multiple line breaks
+ markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
+
+ return markdown_content
+
+ except RequestException as e:
+ return f"Error fetching the webpage: {str(e)}"
+ except Exception as e:
+ return f"An unexpected error occurred: {str(e)}"
+```
+
+Let’s test our tool:
+
+
+```python
+print(visit_webpage("https://en.wikipedia.org/wiki/Hugging_Face")[:500])
+```
+
+## Build Our Multi-Agent System
+
+We will now use the tools `search` and `visit_webpage` to create the web agent.
+
+
+```python
+web_agent = ToolCallingAgent(
+ tools=[DuckDuckGoSearchTool(), visit_webpage],
+ model=model,
+ name="search",
+ description="Runs web searches for you. Give it your query as an argument.",
+)
+
+manager_agent = CodeAgent(
+ tools=[],
+ model=model,
+ managed_agents=[web_agent],
+ additional_authorized_imports=["time", "numpy", "pandas"],
+)
+```
+
+Let’s run our system with the following query:
+
+
+```python
+answer = manager_agent.run(
+ "If LLM trainings continue to scale up at the current rhythm until 2030, what would be the electric power in GW required to power the biggest training runs by 2030? What does that correspond to, compared to some countries? Please provide a source for any number used."
+)
+
+print(answer)
+```
+
+Awesome! We've successfully run a multi-agent system. Let's end the agentops session with a "Success" state. You can also end the session with a "Failure" or "Indeterminate" state, which is set as default.
+
+
+```python
+agentops.end_trace(tracer, end_state="Success")
+```
+
+You can view the session in the [AgentOps dashboard](https://app.agentops.ai/sessions) by clicking the link provided after ending the session.
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/v2/integrations/smolagents.mdx b/docs/v2/integrations/smolagents.mdx
new file mode 100644
index 000000000..639b36edc
--- /dev/null
+++ b/docs/v2/integrations/smolagents.mdx
@@ -0,0 +1,238 @@
+---
+title: Smolagents
+description: "Track and analyze your Smolagents AI agents with AgentOps"
+---
+
+AgentOps provides seamless integration with [Smolagents](https://github.com/huggingface/smolagents), HuggingFace's lightweight framework for building AI agents. Monitor your agent workflows, tool usage, and execution traces automatically.
+
+## Core Concepts
+
+Smolagents is designed around several key concepts:
+
+- **Agents**: AI assistants that can use tools and reason through problems
+- **Tools**: Functions that agents can call to interact with external systems
+- **Models**: LLM backends that power agent reasoning (supports various providers via LiteLLM)
+- **Code Execution**: Agents can write and execute Python code in sandboxed environments
+- **Multi-Agent Systems**: Orchestrate multiple specialized agents working together
+
+## Installation
+
+Install AgentOps and Smolagents, along with any additional dependencies:
+
+
+ ```bash pip
+ pip install agentops smolagents python-dotenv
+ ```
+ ```bash poetry
+ poetry add agentops smolagents python-dotenv
+ ```
+ ```bash uv
+ uv add agentops smolagents python-dotenv
+ ```
+
+
+## Setting Up API Keys
+
+Before using Smolagents with AgentOps, you need to set up your API keys:
+- **AGENTOPS_API_KEY**: From your [AgentOps Dashboard](https://app.agentops.ai/)
+- **LLM API Keys**: Depending on your chosen model provider (e.g., OPENAI_API_KEY, ANTHROPIC_API_KEY)
+
+Set these as environment variables or in a `.env` file.
+
+
+ ```bash Export to CLI
+ export AGENTOPS_API_KEY="your_agentops_api_key_here"
+ export OPENAI_API_KEY="your_openai_api_key_here"
+ ```
+ ```txt Set in .env file
+ AGENTOPS_API_KEY="your_agentops_api_key_here"
+ OPENAI_API_KEY="your_openai_api_key_here"
+ ```
+
+
+Then load them in your Python code:
+```python
+from dotenv import load_dotenv
+import os
+
+load_dotenv()
+
+AGENTOPS_API_KEY = os.getenv("AGENTOPS_API_KEY")
+OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
+```
+
+## Usage
+
+Initialize AgentOps before creating your Smolagents to automatically track all agent interactions:
+
+```python
+import agentops
+from smolagents import LiteLLMModel, ToolCallingAgent, DuckDuckGoSearchTool
+
+# Initialize AgentOps
+agentops.init()
+
+# Create a model (supports various providers via LiteLLM)
+model = LiteLLMModel("openai/gpt-4o-mini")
+
+# Create an agent with tools
+agent = ToolCallingAgent(
+ tools=[DuckDuckGoSearchTool()],
+ model=model,
+)
+
+# Run the agent
+result = agent.run("What are the latest developments in AI safety research?")
+print(result)
+```
+
+## Examples
+
+
+```python Simple Math Agent
+import agentops
+from smolagents import LiteLLMModel, CodeAgent
+
+# Initialize AgentOps
+agentops.init()
+
+# Create a model
+model = LiteLLMModel("openai/gpt-4o-mini")
+
+# Create a code agent that can perform calculations
+agent = CodeAgent(
+ tools=[], # No external tools needed for math
+ model=model,
+ additional_authorized_imports=["math", "numpy"],
+)
+
+# Ask the agent to solve a math problem
+result = agent.run(
+ "Calculate the compound interest on $10,000 invested at 5% annual rate "
+ "for 10 years, compounded monthly. Show your work."
+)
+
+print(result)
+```
+
+```python Research Agent with Tools
+import agentops
+from smolagents import (
+ LiteLLMModel,
+ ToolCallingAgent,
+ DuckDuckGoSearchTool,
+ tool
+)
+
+# Initialize AgentOps
+agentops.init()
+
+# Create a custom tool
+@tool
+def word_counter(text: str) -> str:
+ """
+ Counts the number of words in a given text.
+
+ Args:
+ text: The text to count words in.
+
+ Returns:
+ A string with the word count.
+ """
+ word_count = len(text.split())
+ return f"The text contains {word_count} words."
+
+# Create model and agent
+model = LiteLLMModel("openai/gpt-4o-mini")
+
+agent = ToolCallingAgent(
+ tools=[DuckDuckGoSearchTool(), word_counter],
+ model=model,
+)
+
+# Run a research task
+result = agent.run(
+ "Search for information about the James Webb Space Telescope's latest discoveries. "
+ "Then count how many words are in your summary."
+)
+
+print(result)
+```
+
+```python Multi-Step Task Agent
+import agentops
+from smolagents import LiteLLMModel, CodeAgent, tool
+import json
+
+# Initialize AgentOps
+agentops.init()
+
+# Create tools for data processing
+@tool
+def save_json(data: dict, filename: str) -> str:
+ """
+ Saves data to a JSON file.
+
+ Args:
+ data: Dictionary to save
+ filename: Name of the file to save to
+
+ Returns:
+ Success message
+ """
+ with open(filename, 'w') as f:
+ json.dump(data, f, indent=2)
+ return f"Data saved to {filename}"
+
+@tool
+def load_json(filename: str) -> dict:
+ """
+ Loads data from a JSON file.
+
+ Args:
+ filename: Name of the file to load from
+
+ Returns:
+ The loaded data as a dictionary
+ """
+ with open(filename, 'r') as f:
+ return json.load(f)
+
+# Create agent
+model = LiteLLMModel("openai/gpt-4o-mini")
+
+agent = CodeAgent(
+ tools=[save_json, load_json],
+ model=model,
+ additional_authorized_imports=["pandas", "datetime"],
+)
+
+# Run a multi-step data processing task
+result = agent.run("""
+1. Create a dataset of 5 fictional employees with names, departments, and salaries
+2. Save this data to 'employees.json'
+3. Load the data back and calculate the average salary
+4. Find the highest paid employee
+5. Return a summary of your findings
+""")
+
+print(result)
+```
+
+## More Examples
+
+
+
+ Complex multi-agent web browsing system
+
+
+ Convert natural language queries to SQL
+
+
+
+Visit your [AgentOps Dashboard](https://app.agentops.ai) to see detailed traces of your Smolagents executions, tool usage, and agent reasoning steps.
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/v2/introduction.mdx b/docs/v2/introduction.mdx
index 02c252328..1accbb705 100644
--- a/docs/v2/introduction.mdx
+++ b/docs/v2/introduction.mdx
@@ -35,6 +35,7 @@ description: "AgentOps is the developer favorite platform for testing, debugging
} iconType="image" href="/v2/integrations/google_adk" />
} iconType="image" href="/v2/integrations/langchain" />
} iconType="image" href="/v2/integrations/agents_sdk" />
+ } iconType="image" href="/v2/integrations/smolagents" />
Observability and monitoring for your AI agents and LLM apps. And we do it all in just two lines of code...
diff --git a/examples/smolagents/README.md b/examples/smolagents/README.md
index bfe99cf30..851997148 100644
--- a/examples/smolagents/README.md
+++ b/examples/smolagents/README.md
@@ -1,32 +1,56 @@
-# SmolAgents Examples with AgentOps
+# Smolagents Examples
-This directory contains examples of using SmolAgents with AgentOps instrumentation.
+This directory contains examples demonstrating how to use Smolagents with AgentOps for agent monitoring and observability.
-## Prerequisites
+## Examples
-- Python >= 3.10 < 3.13
-- Install required dependencies:
- ```
- pip install agentops smolagents
- ```
+### 1. Simple Task Agent (`simple_task_agent.py`)
+A minimal example showing how to create a single agent that can answer questions using web search. This is the best starting point for understanding Smolagents basics.
-## Examples
+**Features:**
+- Basic agent setup with search capabilities
+- AgentOps integration for tracking
+- Error handling and session management
+
+### 2. Multi-Agent System (`multi_smolagents_system.py`)
+A more complex example demonstrating a hierarchical multi-agent system with:
+- Manager agent coordinating multiple specialized agents
+- Web search agent with custom tools
+- Code interpreter capabilities
+- Tool creation and usage
+
+### 3. Text to SQL Agent (`text_to_sql.py`)
+An example showing how to create an agent that can convert natural language queries into SQL statements.
+
+## Running the Examples
-### 1. Multi-Agent System
+1. Install dependencies:
+```bash
+pip install agentops smolagents python-dotenv
+```
-Example: `multi_smolagents_system`
-This example demonstrates:
-- Multi-agent system coordination
+2. Set up your API keys in a `.env` file:
+```env
+AGENTOPS_API_KEY=your_agentops_api_key_here
+OPENAI_API_KEY=your_openai_api_key_here
+```
-### 2. Text to SQL
+3. Run any example:
+```bash
+python simple_task_agent.py
+```
-Example: `text_to_sql`
+4. View the results in your [AgentOps Dashboard](https://app.agentops.ai/sessions)
-This example demonstrates:
-- Natural language to SQL conversion
+## Key Concepts
-## AgentOps Integration
+- **Agents**: AI assistants that can use tools and reason through problems
+- **Tools**: Functions that agents can call to interact with external systems
+- **Models**: LLM backends via LiteLLM (supports OpenAI, Anthropic, etc.)
+- **AgentOps Integration**: Automatic tracking of all agent activities
-These examples show how to use AgentOps to monitor and analyze your AI applications. AgentOps automatically instruments your SmolAgents calls to provide insights into performance, usage patterns, and model behavior.
+## Learn More
-To learn more about AgentOps, visit [https://www.agentops.ai](https://www.agentops.ai)
+- [Smolagents Documentation](https://github.com/huggingface/smolagents)
+- [AgentOps Documentation](https://docs.agentops.ai)
+- [Full Integration Guide](https://docs.agentops.ai/v2/integrations/smolagents)
diff --git a/examples/smolagents/multi_smolagents_system.ipynb b/examples/smolagents/multi_smolagents_system.ipynb
index 17d0bdd48..9a97f154e 100644
--- a/examples/smolagents/multi_smolagents_system.ipynb
+++ b/examples/smolagents/multi_smolagents_system.ipynb
@@ -34,7 +34,7 @@
" | Visit webpage tool |\n",
" +--------------------------------+\n",
"```\n",
- "Let\u2019s set up this system.\n",
+ "Let’s set up this system.\n",
"\n",
"Run the line below to install the required dependencies:"
]
@@ -57,32 +57,51 @@
"id": "00509499",
"metadata": {},
"source": [
- "\ud83d\udd87\ufe0f Now we initialize the AgentOps client and load the environment variables to use the API keys."
+ "🖇️ Now we initialize the AgentOps client and load the environment variables to use the API keys."
]
},
{
"cell_type": "code",
+ "execution_count": null,
"id": "330770fd",
"metadata": {},
"outputs": [],
- "source": "import agentops\nfrom dotenv import load_dotenv\nimport os\nimport re\nimport requests\nfrom markdownify import markdownify\nfrom requests.exceptions import RequestException\nfrom smolagents import tool\nfrom smolagents import LiteLLMModel\nfrom smolagents import (\n CodeAgent,\n ToolCallingAgent,\n ManagedAgent,\n DuckDuckGoSearchTool,\n)\n\nload_dotenv()\nos.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_api_key_here\")\nos.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\")",
- "execution_count": null
+ "source": [
+ "import agentops\n",
+ "from dotenv import load_dotenv\n",
+ "import os\n",
+ "import re\n",
+ "import requests\n",
+ "from markdownify import markdownify\n",
+ "from requests.exceptions import RequestException\n",
+ "\n",
+ "load_dotenv()\n",
+ "os.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_api_key_here\")\n",
+ "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\")"
+ ]
},
{
"cell_type": "markdown",
"id": "9516d2a7",
"metadata": {},
"source": [
- "\u26a1\ufe0f Our agent will be powered by `openai/gpt-4o-mini` using the `LiteLLMModel` class."
+ "⚡️ Our agent will be powered by `openai/gpt-4o-mini` using the `LiteLLMModel` class."
]
},
{
"cell_type": "code",
+ "execution_count": null,
"id": "5f78927c",
"metadata": {},
"outputs": [],
- "source": "agentops.init(auto_start_session=False)\ntracer = agentops.start_trace(\n trace_name=\"Orchestrate a Multi-Agent System\", tags=[\"smolagents\", \"example\", \"multi-agent\", \"agentops-example\"]\n)\nmodel = LiteLLMModel(\"openai/gpt-4o-mini\")",
- "execution_count": null
+ "source": [
+ "agentops.init(auto_start_session=False)\n",
+ "tracer = agentops.start_trace(\n",
+ " trace_name=\"Orchestrate a Multi-Agent System\", tags=[\"smolagents\", \"example\", \"multi-agent\", \"agentops-example\"]\n",
+ ")\n",
+ "from smolagents import LiteLLMModel, tool ,CodeAgent, ToolCallingAgent, DuckDuckGoSearchTool\n",
+ "model = LiteLLMModel(\"openai/gpt-4o-mini\")"
+ ]
},
{
"cell_type": "markdown",
@@ -91,23 +110,51 @@
"source": [
"## Create a Web Search Tool\n",
"\n",
- "For web browsing, we can already use our pre-existing `DuckDuckGoSearchTool`. However, we will also create a `VisitWebpageTool` from scratch using `markdownify`. Here\u2019s how:"
+ "For web browsing, we can already use our pre-existing `DuckDuckGoSearchTool`. However, we will also create a `VisitWebpageTool` from scratch using `markdownify`. Here’s how:"
]
},
{
"cell_type": "code",
+ "execution_count": null,
"id": "01689447",
"metadata": {},
"outputs": [],
- "source": "@tool\ndef visit_webpage(url: str) -> str:\n \"\"\"Visits a webpage at the given URL and returns its content as a markdown string.\n\n Args:\n url: The URL of the webpage to visit.\n\n Returns:\n The content of the webpage converted to Markdown, or an error message if the request fails.\n \"\"\"\n try:\n # Send a GET request to the URL\n response = requests.get(url)\n response.raise_for_status() # Raise an exception for bad status codes\n\n # Convert the HTML content to Markdown\n markdown_content = markdownify(response.text).strip()\n\n # Remove multiple line breaks\n markdown_content = re.sub(r\"\\n{3,}\", \"\\n\\n\", markdown_content)\n\n return markdown_content\n\n except RequestException as e:\n return f\"Error fetching the webpage: {str(e)}\"\n except Exception as e:\n return f\"An unexpected error occurred: {str(e)}\"",
- "execution_count": null
+ "source": [
+ "@tool\n",
+ "def visit_webpage(url: str) -> str:\n",
+ " \"\"\"Visits a webpage at the given URL and returns its content as a markdown string.\n",
+ "\n",
+ " Args:\n",
+ " url: The URL of the webpage to visit.\n",
+ "\n",
+ " Returns:\n",
+ " The content of the webpage converted to Markdown, or an error message if the request fails.\n",
+ " \"\"\"\n",
+ " try:\n",
+ " # Send a GET request to the URL\n",
+ " response = requests.get(url)\n",
+ " response.raise_for_status() # Raise an exception for bad status codes\n",
+ "\n",
+ " # Convert the HTML content to Markdown\n",
+ " markdown_content = markdownify(response.text).strip()\n",
+ "\n",
+ " # Remove multiple line breaks\n",
+ " markdown_content = re.sub(r\"\\n{3,}\", \"\\n\\n\", markdown_content)\n",
+ "\n",
+ " return markdown_content\n",
+ "\n",
+ " except RequestException as e:\n",
+ " return f\"Error fetching the webpage: {str(e)}\"\n",
+ " except Exception as e:\n",
+ " return f\"An unexpected error occurred: {str(e)}\""
+ ]
},
{
"cell_type": "markdown",
"id": "3c45517b",
"metadata": {},
"source": [
- "Let\u2019s test our tool:"
+ "Let’s test our tool:"
]
},
{
@@ -132,18 +179,32 @@
},
{
"cell_type": "code",
+ "execution_count": null,
"id": "f274b34f",
"metadata": {},
"outputs": [],
- "source": "web_agent = ToolCallingAgent(\n tools=[DuckDuckGoSearchTool(), visit_webpage],\n model=model,\n max_iterations=10,\n)\n\nmanaged_web_agent = ManagedAgent(\n agent=web_agent,\n name=\"search\",\n description=\"Runs web searches for you. Give it your query as an argument.\",\n)\n\nmanager_agent = CodeAgent(\n tools=[],\n model=model,\n managed_agents=[managed_web_agent],\n additional_authorized_imports=[\"time\", \"numpy\", \"pandas\"],\n)",
- "execution_count": null
+ "source": [
+ "web_agent = ToolCallingAgent(\n",
+ " tools=[DuckDuckGoSearchTool(), visit_webpage],\n",
+ " model=model,\n",
+ " name=\"search\",\n",
+ " description=\"Runs web searches for you. Give it your query as an argument.\",\n",
+ ")\n",
+ "\n",
+ "manager_agent = CodeAgent(\n",
+ " tools=[],\n",
+ " model=model,\n",
+ " managed_agents=[web_agent],\n",
+ " additional_authorized_imports=[\"time\", \"numpy\", \"pandas\"],\n",
+ ")"
+ ]
},
{
"cell_type": "markdown",
"id": "d5977883",
"metadata": {},
"source": [
- "Let\u2019s run our system with the following query:"
+ "Let’s run our system with the following query:"
]
},
{
@@ -189,7 +250,7 @@
],
"metadata": {
"kernelspec": {
- "display_name": "test",
+ "display_name": ".venv",
"language": "python",
"name": "python3"
},
@@ -203,9 +264,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.12.8"
+ "version": "3.11.12"
}
},
"nbformat": 4,
"nbformat_minor": 5
-}
\ No newline at end of file
+}
diff --git a/examples/smolagents/multi_smolagents_system.py b/examples/smolagents/multi_smolagents_system.py
index b7d0319de..c14740b30 100644
--- a/examples/smolagents/multi_smolagents_system.py
+++ b/examples/smolagents/multi_smolagents_system.py
@@ -40,7 +40,6 @@
from smolagents import (
CodeAgent,
ToolCallingAgent,
- ManagedAgent,
DuckDuckGoSearchTool,
)
@@ -96,19 +95,12 @@ def visit_webpage(url: str) -> str:
web_agent = ToolCallingAgent(
tools=[DuckDuckGoSearchTool(), visit_webpage],
model=model,
- max_iterations=10,
-)
-
-managed_web_agent = ManagedAgent(
- agent=web_agent,
- name="search",
- description="Runs web searches for you. Give it your query as an argument.",
)
manager_agent = CodeAgent(
tools=[],
model=model,
- managed_agents=[managed_web_agent],
+ managed_agents=[web_agent],
additional_authorized_imports=["time", "numpy", "pandas"],
)
# Let’s run our system with the following query: