From 8e069b6f8e6f362a33b53bb4f2aab4f2256078b5 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Wed, 18 Jun 2025 04:50:18 +0530 Subject: [PATCH 1/8] Refactor instrumentation modules for improved structure and consistency --- agentops/instrumentation/README.md | 159 +++- agentops/instrumentation/ag2/instrumentor.py | 676 +++++++-------- agentops/instrumentation/agno/instrumentor.py | 387 ++++----- .../instrumentation/anthropic/instrumentor.py | 179 ++-- agentops/instrumentation/common/__init__.py | 75 +- .../instrumentation/common/instrumentor.py | 148 ++++ agentops/instrumentation/common/metrics.py | 100 +++ .../instrumentation/common/span_management.py | 174 ++++ agentops/instrumentation/common/streaming.py | 218 +++++ .../instrumentation/common/token_counting.py | 173 ++++ .../concurrent_futures/instrumentation.py | 70 +- .../instrumentation/crewai/instrumentation.py | 796 ++++++++---------- .../google_adk/instrumentor.py | 74 +- .../google_genai/instrumentor.py | 81 +- .../ibm_watsonx_ai/instrumentor.py | 101 ++- agentops/instrumentation/mem0/instrumentor.py | 68 +- .../instrumentation/openai/instrumentor.py | 168 ++-- .../openai_agents/attributes/common.py | 2 +- .../smolagents/instrumentor.py | 87 +- agentops/semconv/README.md | 177 ++-- agentops/semconv/workflow.py | 59 +- .../anthropic/test_instrumentor.py | 98 ++- tests/unit/instrumentation/mock_span.py | 4 +- .../openai_agents/test_openai_agents.py | 12 +- .../openai_core/test_instrumentor.py | 67 +- 25 files changed, 2569 insertions(+), 1584 deletions(-) create mode 100644 agentops/instrumentation/common/instrumentor.py create mode 100644 agentops/instrumentation/common/metrics.py create mode 100644 agentops/instrumentation/common/span_management.py create mode 100644 agentops/instrumentation/common/streaming.py create mode 100644 agentops/instrumentation/common/token_counting.py diff --git a/agentops/instrumentation/README.md b/agentops/instrumentation/README.md index d6fea178b..a97b4649c 100644 --- a/agentops/instrumentation/README.md +++ b/agentops/instrumentation/README.md @@ -4,29 +4,164 @@ This package provides OpenTelemetry instrumentation for various LLM providers an ## Available Instrumentors -- OpenAI (`v0.27.0+` and `v1.0.0+`) +- **OpenAI** (`v0.27.0+` and `v1.0.0+`) +- **Anthropic** (`v0.7.0+`) +- **Google GenAI** (`v0.1.0+`) +- **IBM WatsonX AI** (`v0.1.0+`) +- **CrewAI** (`v0.56.0+`) +- **AG2/AutoGen** (`v0.3.2+`) +- **Google ADK** (`v0.1.0+`) +- **Agno** (`v0.0.1+`) +- **Mem0** (`v0.1.0+`) +- **SmolAgents** (`v0.1.0+`) +## Common Module Usage -## Usage +The `agentops.instrumentation.common` module provides shared utilities for creating instrumentations: -### OpenAI Instrumentation +### Base Instrumentor + +Use `BaseAgentOpsInstrumentor` for creating new instrumentations: + +```python +from agentops.instrumentation.common import BaseAgentOpsInstrumentor, InstrumentorConfig, WrapConfig + +class MyInstrumentor(BaseAgentOpsInstrumentor): + def __init__(self): + config = InstrumentorConfig( + library_name="my-library", + library_version="1.0.0", + wrapped_methods=[ + WrapConfig( + trace_name="my.method", + package="my_library.module", + class_name="MyClass", + method_name="my_method", + handler=my_attribute_handler + ) + ], + dependencies=["my-library >= 1.0.0"] + ) + super().__init__(config) +``` + +### Attribute Handlers + +Create attribute handlers to extract data from method calls: + +```python +from agentops.instrumentation.common import AttributeMap + +def my_attribute_handler(args=None, kwargs=None, return_value=None) -> AttributeMap: + attributes = {} + + if kwargs and "model" in kwargs: + attributes["llm.request.model"] = kwargs["model"] + + if return_value and hasattr(return_value, "usage"): + attributes["llm.usage.total_tokens"] = return_value.usage.total_tokens + + return attributes +``` + +### Span Management + +Use the span management utilities for consistent span creation: + +```python +from agentops.instrumentation.common import create_span, SpanAttributeManager + +# Create an attribute manager +attr_manager = SpanAttributeManager(service_name="my-service") + +# Use the create_span context manager +with create_span( + tracer, + "my.operation", + attributes={"my.attribute": "value"}, + attribute_manager=attr_manager +) as span: + # Your operation code here + pass +``` + +### Token Counting + +Use the token counting utilities for consistent token usage extraction: ```python -from opentelemetry.instrumentation.openai import OpenAIInstrumentor +from agentops.instrumentation.common import TokenUsageExtractor, set_token_usage_attributes + +# Extract token usage from a response +usage = TokenUsageExtractor.extract_from_response(response) + +# Set token usage attributes on a span +set_token_usage_attributes(span, response) +``` + +### Streaming Support -from agentops.telemetry import get_tracer_provider() +Use streaming utilities for handling streaming responses: -# Initialize and instrument -instrumentor = OpenAIInstrumentor( - enrich_assistant=True, # Include assistant messages in spans - enrich_token_usage=True, # Include token usage in spans - enable_trace_context_propagation=True, # Enable trace context propagation +```python +from agentops.instrumentation.common import create_stream_wrapper_factory, StreamingResponseHandler + +# Create a stream wrapper factory +wrapper = create_stream_wrapper_factory( + tracer, + "my.stream", + extract_chunk_content=StreamingResponseHandler.extract_generic_chunk_content, + initial_attributes={"stream.type": "text"} ) -instrumentor.instrument(tracer_provider=tracer_provider) # <-- Uses the global AgentOps TracerProvider + +# Apply to streaming methods +wrap_function_wrapper("my_module", "stream_method", wrapper) ``` +### Metrics + +Use standard metrics for consistency across instrumentations: + +```python +from agentops.instrumentation.common import StandardMetrics, MetricsRecorder + +# Create standard metrics +metrics = StandardMetrics.create_standard_metrics(meter) + +# Use the metrics recorder +recorder = MetricsRecorder(metrics) +recorder.record_token_usage(prompt_tokens=100, completion_tokens=50) +recorder.record_duration(1.5) +``` + +## Creating a New Instrumentor + +1. Create a new directory under `agentops/instrumentation/` for your provider +2. Create an `__init__.py` file with version information +3. Create an `instrumentor.py` file extending `BaseAgentOpsInstrumentor` +4. Create attribute handlers in an `attributes/` subdirectory +5. Add your instrumentor to the main `__init__.py` configuration + +Example structure: +``` +agentops/instrumentation/ +├── my_provider/ +│ ├── __init__.py +│ ├── instrumentor.py +│ └── attributes/ +│ ├── __init__.py +│ └── handlers.py +``` -> To add custom instrumentation, please do so in the `third_party/opentelemetry` directory. +## Best Practices +1. **Use Common Utilities**: Leverage the common module for consistency +2. **Follow Semantic Conventions**: Use attributes from `agentops.semconv` +3. **Handle Errors Gracefully**: Wrap operations in try-except blocks +4. **Support Async**: Provide both sync and async method wrapping +5. **Document Attributes**: Comment on what attributes are captured +6. **Test Thoroughly**: Write unit tests for your instrumentor +## Examples +See the `examples/` directory for usage examples of each instrumentor. diff --git a/agentops/instrumentation/ag2/instrumentor.py b/agentops/instrumentation/ag2/instrumentor.py index 661e7415f..ff2cdd6fe 100644 --- a/agentops/instrumentation/ag2/instrumentor.py +++ b/agentops/instrumentation/ag2/instrumentor.py @@ -5,16 +5,22 @@ """ import json -from typing import Collection - -from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.trace import get_tracer, SpanKind, Status, StatusCode -from opentelemetry.metrics import get_meter +from typing import Dict, Any from wrapt import wrap_function_wrapper +from opentelemetry.trace import SpanKind +from opentelemetry.metrics import Meter +from opentelemetry.instrumentation.utils import unwrap as otel_unwrap + from agentops.logging import logger +from agentops.instrumentation.common import ( + BaseAgentOpsInstrumentor, + InstrumentorConfig, + StandardMetrics, + create_span, + SpanAttributeManager, +) from agentops.instrumentation.ag2 import LIBRARY_NAME, LIBRARY_VERSION -from agentops.semconv import Meters from agentops.semconv.message import MessageAttributes from agentops.semconv.span_attributes import SpanAttributes from agentops.semconv.agent import AgentAttributes @@ -22,7 +28,7 @@ from agentops.semconv.tool import ToolAttributes -class AG2Instrumentor(BaseInstrumentor): +class AG2Instrumentor(BaseAgentOpsInstrumentor): """Instrumentor for AG2 (AutoGen) This instrumentor captures high-level events from AG2's agent interactions, @@ -30,34 +36,27 @@ class AG2Instrumentor(BaseInstrumentor): tool usage information. """ - def instrumentation_dependencies(self) -> Collection[str]: - """Return packages required for instrumentation.""" - return ["ag2 >= 0.3.2"] - - def _instrument(self, **kwargs): - """Instrument AG2 components.""" - tracer_provider = kwargs.get("tracer_provider") - tracer = get_tracer(LIBRARY_NAME, LIBRARY_VERSION, tracer_provider) - - meter_provider = kwargs.get("meter_provider") - meter = get_meter(LIBRARY_NAME, LIBRARY_VERSION, meter_provider) - - # Create metrics - meter.create_histogram( - name=Meters.LLM_OPERATION_DURATION, - unit="s", - description="AG2 operation duration", + def __init__(self): + config = InstrumentorConfig( + library_name=LIBRARY_NAME, + library_version=LIBRARY_VERSION, + wrapped_methods=[], # We'll use custom wrapping + metrics_enabled=True, + dependencies=["ag2 >= 0.3.2"], ) + super().__init__(config) + self._attribute_manager = None - meter.create_counter( - name=Meters.LLM_COMPLETIONS_EXCEPTIONS, - unit="time", - description="Exceptions in AG2 operations", - ) + def _create_metrics(self, meter: Meter) -> Dict[str, Any]: + """Create metrics for AG2 instrumentation.""" + return StandardMetrics.create_standard_metrics(meter) - self._wrap_methods(tracer) + def _initialize(self, **kwargs): + """Initialize attribute manager.""" + self._attribute_manager = SpanAttributeManager(service_name="agentops", deployment_environment="production") - def _wrap_methods(self, tracer): + def _custom_wrap(self, **kwargs): + """Perform custom wrapping for AG2 methods.""" methods_to_wrap = [ ("autogen.agentchat.conversable_agent", "ConversableAgent.__init__", self._agent_init_wrapper), ("autogen.agentchat.conversable_agent", "ConversableAgent.run", self._agent_run_wrapper), @@ -78,15 +77,13 @@ def _wrap_methods(self, tracer): for module, method, wrapper_factory in methods_to_wrap: try: - wrap_function_wrapper(module, method, wrapper_factory(tracer)) + wrap_function_wrapper(module, method, wrapper_factory(self._tracer)) logger.debug(f"Successfully wrapped {method}") except (AttributeError, ModuleNotFoundError) as e: logger.debug(f"Failed to wrap {method}: {e}") - def _uninstrument(self, **kwargs): + def _custom_unwrap(self, **kwargs): """Remove instrumentation from AG2.""" - from opentelemetry.instrumentation.utils import unwrap as otel_unwrap - # Unwrap all instrumented methods methods_to_unwrap = [ ("autogen.agentchat.conversable_agent", "ConversableAgent.__init__"), @@ -158,129 +155,30 @@ def wrapper(wrapped, instance, args, kwargs): span_name = f"ag2.chat.{initiator_name}_to_{recipient_name}" - with tracer.start_as_current_span(span_name, kind=SpanKind.INTERNAL) as span: - try: - span.set_attribute(AgentAttributes.FROM_AGENT, initiator_name) - span.set_attribute(AgentAttributes.TO_AGENT, recipient_name) - span.set_attribute("ag2.chat.type", "individual") - span.set_attribute("ag2.chat.initiator", initiator_name) - span.set_attribute("ag2.chat.recipient", recipient_name) - - # Extract system message from both agents - initiator_system_msg = getattr(instance, "system_message", "") - if initiator_system_msg: - initiator_system_msg = ( - "" - if initiator_system_msg is None - else str(initiator_system_msg) - if not isinstance(initiator_system_msg, str) - else initiator_system_msg - ) - span.set_attribute("ag2.initiator.system_message", initiator_system_msg) - - recipient_system_msg = getattr(recipient_agent, "system_message", "") - if recipient_system_msg: - recipient_system_msg = ( - "" - if recipient_system_msg is None - else str(recipient_system_msg) - if not isinstance(recipient_system_msg, str) - else recipient_system_msg - ) - span.set_attribute(SpanAttributes.LLM_REQUEST_SYSTEM_INSTRUCTION, recipient_system_msg) - - # Extract LLM config from both agents - initiator_llm_config = getattr(instance, "llm_config", {}) - if isinstance(initiator_llm_config, dict) and initiator_llm_config: - model = initiator_llm_config.get("model", "unknown") - span.set_attribute("ag2.initiator.model", model) - - recipient_llm_config = getattr(recipient_agent, "llm_config", {}) - self._set_llm_config_attributes(span, recipient_llm_config) - - # Extract initial message - initial_message = kwargs.get("message", "") - if initial_message: - initial_message = ( - "" - if initial_message is None - else str(initial_message) - if not isinstance(initial_message, str) - else initial_message - ) - span.set_attribute("ag2.chat.initial_message", initial_message) - - result = wrapped(*args, **kwargs) - - # Extract chat history from both agents after completion - try: - # Get initiator chat history - initiator_chat_history = getattr(instance, "chat_history", []) - if initiator_chat_history: - span.set_attribute("ag2.initiator.message_count", len(initiator_chat_history)) - - # Get recipient chat history - recipient_chat_history = getattr(recipient_agent, "chat_history", []) - if recipient_chat_history: - message_count = len(recipient_chat_history) - span.set_attribute("ag2.conversation.message_count", message_count) - - # Record sample of conversation messages - if message_count > 0: - # First message - first_msg = recipient_chat_history[0] - if isinstance(first_msg, dict): - role = first_msg.get("role", "unknown") - content = first_msg.get("content", "") - name = first_msg.get("name", "unknown") - - span.set_attribute("messaging.prompt.role.0", role) - content = ( - "" - if content is None - else str(content) - if not isinstance(content, str) - else content - ) - span.set_attribute("messaging.prompt.content.0", content) - span.set_attribute("messaging.prompt.speaker.0", name) - - # Last message - last_msg = recipient_chat_history[-1] - if isinstance(last_msg, dict): - role = last_msg.get("role", "unknown") - content = last_msg.get("content", "") - name = last_msg.get("name", "unknown") - - span.set_attribute("messaging.completion.role.0", role) - content = ( - "" - if content is None - else str(content) - if not isinstance(content, str) - else content - ) - span.set_attribute("messaging.completion.content.0", content) - span.set_attribute("messaging.completion.speaker.0", name) - - # Check for tool usage - span.set_attribute("ag2.chat.used_tools", "tool_calls" in last_msg) - - # Capture metadata - if "metadata" in last_msg and isinstance(last_msg["metadata"], dict): - meta = last_msg["metadata"] - if "model" in meta: - span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, meta["model"]) - except Exception as e: - logger.debug(f"Could not extract chat history: {e}") - - span.set_status(Status(StatusCode.OK)) - return result - except Exception as e: - span.set_status(Status(StatusCode.ERROR, str(e))) - span.record_exception(e) - logger.error(f"Error in initiate_chat instrumentation: {e}") - return wrapped(*args, **kwargs) + with create_span( + tracer, span_name, kind=SpanKind.INTERNAL, attribute_manager=self._attribute_manager + ) as span: + span.set_attribute(AgentAttributes.FROM_AGENT, initiator_name) + span.set_attribute(AgentAttributes.TO_AGENT, recipient_name) + span.set_attribute("ag2.chat.type", "individual") + span.set_attribute("ag2.chat.initiator", initiator_name) + span.set_attribute("ag2.chat.recipient", recipient_name) + + # Extract system messages and LLM configs + self._extract_agent_attributes(span, instance, recipient_agent) + + # Extract initial message + initial_message = kwargs.get("message", "") + if initial_message: + initial_message = self._safe_str(initial_message) + span.set_attribute("ag2.chat.initial_message", initial_message) + + result = wrapped(*args, **kwargs) + + # Extract chat history after completion + self._extract_chat_history(span, instance, recipient_agent) + + return result return wrapper @@ -292,54 +190,39 @@ def wrapper(wrapped, instance, args, kwargs): agent_type = getattr(instance, "_agentops_metadata", {}).get("type", "ConversableAgent") span_name = f"ag2.agent.{agent_name}.run" - with tracer.start_as_current_span(span_name, kind=SpanKind.INTERNAL) as span: - try: - model = getattr(instance, "_agentops_metadata", {}).get("model", "unknown") - - span.set_attribute(AgentAttributes.AGENT_NAME, agent_name) - span.set_attribute(AgentAttributes.AGENT_ROLE, agent_type) - span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, model) - - llm_config = getattr(instance, "llm_config", None) - self._set_llm_config_attributes(span, llm_config) - - # Capture input message if available - message = kwargs.get("message", "") - if message: - content_to_set = "" - if isinstance(message, dict): - content = message.get("content", "") - content_to_set = ( - "" if content is None else str(content) if not isinstance(content, str) else content - ) - elif isinstance(message, str): - content_to_set = message - else: - content_to_set = str(message) - - span.set_attribute("ag2.run.input_message", content_to_set) - - # Initialize completions and prompts count - span.set_attribute(SpanAttributes.LLM_COMPLETIONS, 0) - span.set_attribute(SpanAttributes.LLM_PROMPTS, 0) - - response = wrapped(*args, **kwargs) - - if hasattr(response, "chat_history"): - self._capture_conversation_summary(span, instance, response) - elif hasattr(response, "get") and callable(response.get): - model_info = response.get("model", "") - if model_info: - span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, model_info) - - span.set_attribute(WorkflowAttributes.WORKFLOW_STEP_STATUS, "completed") - span.set_status(Status(StatusCode.OK)) - return response - except Exception as e: - span.set_status(Status(StatusCode.ERROR, str(e))) - span.record_exception(e) - logger.error(f"Error in agent run instrumentation: {e}") - return wrapped(*args, **kwargs) + with create_span( + tracer, span_name, kind=SpanKind.INTERNAL, attribute_manager=self._attribute_manager + ) as span: + model = getattr(instance, "_agentops_metadata", {}).get("model", "unknown") + + span.set_attribute(AgentAttributes.AGENT_NAME, agent_name) + span.set_attribute(AgentAttributes.AGENT_ROLE, agent_type) + span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, model) + + llm_config = getattr(instance, "llm_config", None) + self._set_llm_config_attributes(span, llm_config) + + # Capture input message if available + message = kwargs.get("message", "") + if message: + content_to_set = self._extract_message_content(message) + span.set_attribute("ag2.run.input_message", content_to_set) + + # Initialize completions and prompts count + span.set_attribute(SpanAttributes.LLM_COMPLETIONS, 0) + span.set_attribute(SpanAttributes.LLM_PROMPTS, 0) + + response = wrapped(*args, **kwargs) + + if hasattr(response, "chat_history"): + self._capture_conversation_summary(span, instance, response) + elif hasattr(response, "get") and callable(response.get): + model_info = response.get("model", "") + if model_info: + span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, model_info) + + span.set_attribute(WorkflowAttributes.WORKFLOW_STEP_STATUS, "completed") + return response return wrapper @@ -347,43 +230,28 @@ def _group_chat_run_wrapper(self, tracer): """Wrapper for capturing group chat execution.""" def wrapper(wrapped, instance, args, kwargs): - with tracer.start_as_current_span("ag2.groupchat.run", kind=SpanKind.INTERNAL) as span: - try: - group_chat = getattr(instance, "groupchat", None) - agents = getattr(group_chat, "agents", []) if group_chat else [] - agent_names = [getattr(agent, "name", f"agent_{i}") for i, agent in enumerate(agents)] - - span.set_attribute(AgentAttributes.AGENT_ROLE, "GroupChatManager") - span.set_attribute(AgentAttributes.AGENT_NAME, getattr(instance, "name", "unnamed_manager")) - span.set_attribute("ag2.groupchat.agents", ", ".join(agent_names)) - span.set_attribute("ag2.groupchat.agent_count", len(agents)) - - # Capture input message if available - message = kwargs.get("message", "") - if message: - content_to_set = "" - if isinstance(message, dict): - content = message.get("content", "") - content_to_set = ( - "" if content is None else str(content) if not isinstance(content, str) else content - ) - elif isinstance(message, str): - content_to_set = message - else: - content_to_set = str(message) - - span.set_attribute("ag2.groupchat.input_message", content_to_set) - - result = wrapped(*args, **kwargs) - self._capture_group_chat_summary(span, instance, result) - - span.set_status(Status(StatusCode.OK)) - return result - except Exception as e: - span.set_status(Status(StatusCode.ERROR, str(e))) - span.record_exception(e) - logger.error(f"Error in group chat instrumentation: {e}") - return wrapped(*args, **kwargs) + with create_span( + tracer, "ag2.groupchat.run", kind=SpanKind.INTERNAL, attribute_manager=self._attribute_manager + ) as span: + group_chat = getattr(instance, "groupchat", None) + agents = getattr(group_chat, "agents", []) if group_chat else [] + agent_names = [getattr(agent, "name", f"agent_{i}") for i, agent in enumerate(agents)] + + span.set_attribute(AgentAttributes.AGENT_ROLE, "GroupChatManager") + span.set_attribute(AgentAttributes.AGENT_NAME, getattr(instance, "name", "unnamed_manager")) + span.set_attribute("ag2.groupchat.agents", ", ".join(agent_names)) + span.set_attribute("ag2.groupchat.agent_count", len(agents)) + + # Capture input message if available + message = kwargs.get("message", "") + if message: + content_to_set = self._extract_message_content(message) + span.set_attribute("ag2.groupchat.input_message", content_to_set) + + result = wrapped(*args, **kwargs) + self._capture_group_chat_summary(span, instance, result) + + return result return wrapper @@ -393,70 +261,195 @@ def _tool_execution_wrapper(self, tracer, tool_type): def wrapper(wrapped, instance, args, kwargs): span_name = f"ag2.tool.{tool_type}" - with tracer.start_as_current_span(span_name, kind=SpanKind.INTERNAL) as span: - try: - agent_name = getattr(instance, "name", "unnamed_agent") - span.set_attribute(AgentAttributes.AGENT_NAME, agent_name) - span.set_attribute(ToolAttributes.TOOL_NAME, tool_type) - - if tool_type == "function" and args: - func_call = args[0] - if isinstance(func_call, dict): - span.set_attribute( - MessageAttributes.TOOL_CALL_NAME.format(i=0), func_call.get("name", "unknown") - ) - if "arguments" in func_call: - try: - span.set_attribute( - MessageAttributes.TOOL_CALL_ARGUMENTS.format(i=0), - json.dumps(func_call["arguments"]), - ) - except: - pass - - elif tool_type == "code" and args: - code = args[0] - if isinstance(code, str): - span.set_attribute("ag2.tool.code.size", len(code)) - span.set_attribute("ag2.tool.code.language", kwargs.get("lang", "unknown")) - - result = wrapped(*args, **kwargs) - - if tool_type == "function" and isinstance(result, tuple) and len(result) > 0: - success = result[0] if isinstance(result[0], bool) else False - span.set_attribute(ToolAttributes.TOOL_STATUS, "success" if success else "error") - - if len(result) > 1 and isinstance(result[1], dict): + with create_span( + tracer, span_name, kind=SpanKind.INTERNAL, attribute_manager=self._attribute_manager + ) as span: + agent_name = getattr(instance, "name", "unnamed_agent") + span.set_attribute(AgentAttributes.AGENT_NAME, agent_name) + span.set_attribute(ToolAttributes.TOOL_NAME, tool_type) + + if tool_type == "function" and args: + func_call = args[0] + if isinstance(func_call, dict): + span.set_attribute( + MessageAttributes.TOOL_CALL_NAME.format(i=0), func_call.get("name", "unknown") + ) + if "arguments" in func_call: try: - span.set_attribute(ToolAttributes.TOOL_RESULT, json.dumps(result[1])) + span.set_attribute( + MessageAttributes.TOOL_CALL_ARGUMENTS.format(i=0), + json.dumps(func_call["arguments"]), + ) except: pass - if tool_type == "code" and isinstance(result, tuple) and len(result) >= 3: - exit_code = result[0] - span.set_attribute("exit_code", exit_code) - span.set_attribute(ToolAttributes.TOOL_STATUS, "success" if exit_code == 0 else "error") - - if len(result) > 1 and result[1]: - stdout = result[1] - stdout = "" if stdout is None else str(stdout) if not isinstance(stdout, str) else stdout - span.set_attribute("ag2.tool.code.stdout", stdout) - - if len(result) > 2 and result[2]: - stderr = result[2] - stderr = "" if stderr is None else str(stderr) if not isinstance(stderr, str) else stderr - span.set_attribute("ag2.tool.code.stderr", stderr) - - span.set_status(Status(StatusCode.OK)) - return result - except Exception as e: - span.set_status(Status(StatusCode.ERROR, str(e))) - span.record_exception(e) - logger.error(f"Error in tool execution instrumentation: {e}") - return wrapped(*args, **kwargs) + elif tool_type == "code" and args: + code = args[0] + if isinstance(code, str): + span.set_attribute("ag2.tool.code.size", len(code)) + span.set_attribute("ag2.tool.code.language", kwargs.get("lang", "unknown")) + + result = wrapped(*args, **kwargs) + + self._process_tool_result(span, result, tool_type) + + return result return wrapper + def _group_chat_select_speaker_wrapper(self, tracer): + """Wrapper for capturing which agent is selected to speak in a group chat.""" + + def wrapper(wrapped, instance, args, kwargs): + previous_speaker_name = "unknown" + messages = getattr(instance, "messages", []) + if messages and len(messages) > 0: + previous_speaker_name = messages[-1].get("name", "unknown") + + selected_speaker = wrapped(*args, **kwargs) + + if not selected_speaker: + return selected_speaker + + current_speaker_name = getattr(selected_speaker, "name", "unnamed") + + with create_span( + tracer, "ag2.handoff", kind=SpanKind.INTERNAL, attribute_manager=self._attribute_manager + ) as span: + span.set_attribute(AgentAttributes.FROM_AGENT, previous_speaker_name) + span.set_attribute(AgentAttributes.TO_AGENT, current_speaker_name) + span.set_attribute(AgentAttributes.AGENT_NAME, current_speaker_name) + span.set_attribute(AgentAttributes.AGENT_ROLE, selected_speaker.__class__.__name__) + + system_message = getattr(selected_speaker, "system_message", "") + if system_message: + system_message = self._safe_str(system_message) + span.set_attribute(SpanAttributes.LLM_REQUEST_SYSTEM_INSTRUCTION, system_message) + + self._set_llm_config_attributes(span, getattr(selected_speaker, "llm_config", None)) + + if messages: + for msg in reversed(messages): + if msg.get("name") == current_speaker_name: + if "metadata" in msg and isinstance(msg["metadata"], dict): + meta = msg["metadata"] + if "model" in meta: + span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, meta["model"]) + break + + span.set_attribute("ag2.groupchat.role", "participant") + + return selected_speaker + + return wrapper + + # Helper methods + def _safe_str(self, value): + """Safely convert value to string.""" + if value is None: + return "" + return str(value) if not isinstance(value, str) else value + + def _extract_message_content(self, message): + """Extract content from various message formats.""" + if isinstance(message, dict): + content = message.get("content", "") + return self._safe_str(content) + elif isinstance(message, str): + return message + else: + return str(message) + + def _extract_agent_attributes(self, span, initiator, recipient): + """Extract and set agent attributes on span.""" + # Extract system message from both agents + initiator_system_msg = getattr(initiator, "system_message", "") + if initiator_system_msg: + initiator_system_msg = self._safe_str(initiator_system_msg) + span.set_attribute("ag2.initiator.system_message", initiator_system_msg) + + recipient_system_msg = getattr(recipient, "system_message", "") + if recipient_system_msg: + recipient_system_msg = self._safe_str(recipient_system_msg) + span.set_attribute(SpanAttributes.LLM_REQUEST_SYSTEM_INSTRUCTION, recipient_system_msg) + + # Extract LLM config from both agents + initiator_llm_config = getattr(initiator, "llm_config", {}) + if isinstance(initiator_llm_config, dict) and initiator_llm_config: + model = initiator_llm_config.get("model", "unknown") + span.set_attribute("ag2.initiator.model", model) + + recipient_llm_config = getattr(recipient, "llm_config", {}) + self._set_llm_config_attributes(span, recipient_llm_config) + + def _extract_chat_history(self, span, initiator, recipient): + """Extract chat history information.""" + try: + # Get initiator chat history + initiator_chat_history = getattr(initiator, "chat_history", []) + if initiator_chat_history: + span.set_attribute("ag2.initiator.message_count", len(initiator_chat_history)) + + # Get recipient chat history + recipient_chat_history = getattr(recipient, "chat_history", []) + if recipient_chat_history: + message_count = len(recipient_chat_history) + span.set_attribute("ag2.conversation.message_count", message_count) + + # Record sample of conversation messages + if message_count > 0: + self._set_message_attributes(span, recipient_chat_history[0], 0, "prompt") + self._set_message_attributes(span, recipient_chat_history[-1], 0, "completion") + + # Check for tool usage + last_msg = recipient_chat_history[-1] + span.set_attribute("ag2.chat.used_tools", "tool_calls" in last_msg) + + # Capture metadata + if "metadata" in last_msg and isinstance(last_msg["metadata"], dict): + meta = last_msg["metadata"] + if "model" in meta: + span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, meta["model"]) + except Exception as e: + logger.debug(f"Could not extract chat history: {e}") + + def _set_message_attributes(self, span, message, index, prefix): + """Set message attributes on span.""" + if isinstance(message, dict): + role = message.get("role", "unknown") + content = message.get("content", "") + name = message.get("name", "unknown") + + span.set_attribute(f"messaging.{prefix}.role.{index}", role) + content = self._safe_str(content) + span.set_attribute(f"messaging.{prefix}.content.{index}", content) + span.set_attribute(f"messaging.{prefix}.speaker.{index}", name) + + def _process_tool_result(self, span, result, tool_type): + """Process and set tool execution result attributes.""" + if tool_type == "function" and isinstance(result, tuple) and len(result) > 0: + success = result[0] if isinstance(result[0], bool) else False + span.set_attribute(ToolAttributes.TOOL_STATUS, "success" if success else "error") + + if len(result) > 1 and isinstance(result[1], dict): + try: + span.set_attribute(ToolAttributes.TOOL_RESULT, json.dumps(result[1])) + except: + pass + + if tool_type == "code" and isinstance(result, tuple) and len(result) >= 3: + exit_code = result[0] + span.set_attribute("exit_code", exit_code) + span.set_attribute(ToolAttributes.TOOL_STATUS, "success" if exit_code == 0 else "error") + + if len(result) > 1 and result[1]: + stdout = self._safe_str(result[1]) + span.set_attribute("ag2.tool.code.stdout", stdout) + + if len(result) > 2 and result[2]: + stderr = self._safe_str(result[2]) + span.set_attribute("ag2.tool.code.stderr", stderr) + def _capture_conversation_summary(self, span, agent, response): """Extract and record conversation summary data.""" if not hasattr(response, "chat_history"): @@ -479,29 +472,10 @@ def _capture_conversation_summary(self, span, agent, response): if message_count > 0: for i, msg in enumerate(chat_history[: min(2, message_count)]): - role = msg.get("role", "unknown") - content = msg.get("content", "") - name = msg.get("name", "") - - span.set_attribute(f"messaging.prompt.role.{i}", role) - content = "" if content is None else str(content) if not isinstance(content, str) else content - span.set_attribute(f"messaging.prompt.content.{i}", content) - - if name: - span.set_attribute(f"messaging.prompt.speaker.{i}", name) + self._set_message_attributes(span, msg, i, "prompt") if message_count > 2: - last_msg = chat_history[-1] - role = last_msg.get("role", "unknown") - content = last_msg.get("content", "") - name = last_msg.get("name", "") - - span.set_attribute("messaging.completion.role.0", role) - content = "" if content is None else str(content) if not isinstance(content, str) else content - span.set_attribute("messaging.completion.content.0", content) - - if name: - span.set_attribute("messaging.completion.speaker.0", name) + self._set_message_attributes(span, chat_history[-1], 0, "completion") except Exception as e: logger.error(f"Error capturing conversation summary: {e}") @@ -535,7 +509,7 @@ def _capture_group_chat_summary(self, span, manager, result): name = msg.get("name", "unknown") span.set_attribute(MessageAttributes.PROMPT_ROLE.format(i=i), role) - content = "" if content is None else str(content) if not isinstance(content, str) else content + content = self._safe_str(content) span.set_attribute(MessageAttributes.PROMPT_CONTENT.format(i=i), content) span.set_attribute(MessageAttributes.PROMPT_SPEAKER.format(i=i), name) @@ -546,7 +520,7 @@ def _capture_group_chat_summary(self, span, manager, result): name = last_msg.get("name", "unknown") span.set_attribute(MessageAttributes.COMPLETION_ROLE.format(i=0), role) - content = "" if content is None else str(content) if not isinstance(content, str) else content + content = self._safe_str(content) span.set_attribute(MessageAttributes.COMPLETION_CONTENT.format(i=0), content) span.set_attribute(MessageAttributes.COMPLETION_SPEAKER.format(i=0), name) @@ -556,59 +530,3 @@ def _capture_group_chat_summary(self, span, manager, result): span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, meta["model"]) except Exception as e: logger.error(f"Error capturing group chat summary: {e}") - - def _group_chat_select_speaker_wrapper(self, tracer): - """Wrapper for capturing which agent is selected to speak in a group chat.""" - - def wrapper(wrapped, instance, args, kwargs): - previous_speaker_name = "unknown" - messages = getattr(instance, "messages", []) - if messages and len(messages) > 0: - previous_speaker_name = messages[-1].get("name", "unknown") - - selected_speaker = wrapped(*args, **kwargs) - - if not selected_speaker: - return selected_speaker - - current_speaker_name = getattr(selected_speaker, "name", "unnamed") - - with tracer.start_as_current_span("ag2.handoff", kind=SpanKind.INTERNAL) as span: - try: - span.set_attribute(AgentAttributes.FROM_AGENT, previous_speaker_name) - span.set_attribute(AgentAttributes.TO_AGENT, current_speaker_name) - span.set_attribute(AgentAttributes.AGENT_NAME, current_speaker_name) - span.set_attribute(AgentAttributes.AGENT_ROLE, selected_speaker.__class__.__name__) - - system_message = getattr(selected_speaker, "system_message", "") - if system_message: - system_message = ( - "" - if system_message is None - else str(system_message) - if not isinstance(system_message, str) - else system_message - ) - span.set_attribute(SpanAttributes.LLM_REQUEST_SYSTEM_INSTRUCTION, system_message) - - self._set_llm_config_attributes(span, getattr(selected_speaker, "llm_config", None)) - - if messages: - for msg in reversed(messages): - if msg.get("name") == current_speaker_name: - if "metadata" in msg and isinstance(msg["metadata"], dict): - meta = msg["metadata"] - if "model" in meta: - span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, meta["model"]) - break - - span.set_attribute("ag2.groupchat.role", "participant") - span.set_status(Status(StatusCode.OK)) - except Exception as e: - span.set_status(Status(StatusCode.ERROR, str(e))) - span.record_exception(e) - logger.error(f"Error in group chat select speaker instrumentation: {e}") - - return selected_speaker - - return wrapper diff --git a/agentops/instrumentation/agno/instrumentor.py b/agentops/instrumentation/agno/instrumentor.py index 2755dab9d..2394d1189 100644 --- a/agentops/instrumentation/agno/instrumentor.py +++ b/agentops/instrumentation/agno/instrumentor.py @@ -5,7 +5,7 @@ We focus on instrumenting the following key endpoints: - Agent.run/arun - Main agent workflow execution (sync/async) -- Team._run/_arun - Team workflow execution (sync/async) +- Team._run/_arun - Team workflow execution (sync/async) - Team._run_stream/_arun_stream - Team streaming workflow execution (sync/async) - FunctionCall.execute/aexecute - Tool execution when agents call tools (sync/async) - Agent._run_tool/_arun_tool - Agent internal tool execution (sync/async) @@ -13,22 +13,21 @@ - Workflow.run_workflow/arun_workflow - Workflow execution (sync/async) - Workflow session management methods - Session lifecycle operations -This provides clean visibility into agent workflows and actual tool usage with proper +This provides clean visibility into agent workflows and actual tool usage with proper parent-child span relationships. """ from typing import List, Collection, Any, Optional -from opentelemetry.trace import get_tracer -from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.metrics import get_meter from opentelemetry import trace, context as otel_context from opentelemetry.trace import Status, StatusCode -from wrapt import wrap_function_wrapper import threading from agentops.logging import logger -from agentops.semconv import Meters -from agentops.instrumentation.common.wrappers import WrapConfig, wrap, unwrap +from agentops.instrumentation.common import ( + BaseAgentOpsInstrumentor, + StandardMetrics, +) +from agentops.instrumentation.common.wrappers import WrapConfig # Import attribute handlers from agentops.instrumentation.agno.attributes.agent import get_agent_run_attributes @@ -40,10 +39,6 @@ get_workflow_session_attributes, ) -# Library info for tracer/meter -LIBRARY_NAME = "agentops.instrumentation.agno" -LIBRARY_VERSION = "0.1.0" - class StreamingContextManager: """Manages span contexts for streaming agent and workflow executions.""" @@ -88,10 +83,6 @@ def clear_all(self) -> None: self._agent_sessions.clear() -# Global context manager instance -_streaming_context_manager = StreamingContextManager() - - # Methods to wrap for instrumentation WRAPPED_METHODS: List[WrapConfig] = [ # Workflow session methods @@ -129,11 +120,12 @@ def clear_all(self) -> None: class StreamingResultWrapper: """Wrapper for streaming results that maintains agent span as active throughout iteration.""" - def __init__(self, original_result, span, agent_id, agent_context): + def __init__(self, original_result, span, agent_id, agent_context, streaming_context_manager): self.original_result = original_result self.span = span self.agent_id = agent_id self.agent_context = agent_context + self.streaming_context_manager = streaming_context_manager self._consumed = False def __iter__(self): @@ -150,14 +142,14 @@ def __iter__(self): if not self._consumed: self._consumed = True self.span.end() - _streaming_context_manager.remove_context(self.agent_id) + self.streaming_context_manager.remove_context(self.agent_id) def __getattr__(self, name): """Delegate attribute access to the original result.""" return getattr(self.original_result, name) -def create_streaming_workflow_wrapper(tracer): +def create_streaming_workflow_wrapper(tracer, streaming_context_manager): """Create a streaming-aware wrapper for workflow run methods.""" def wrapper(wrapped, instance, args, kwargs): @@ -180,7 +172,7 @@ def wrapper(wrapped, instance, args, kwargs): # Store context for streaming - capture current context with active span current_context = trace.set_span_in_context(span, otel_context.get_current()) - _streaming_context_manager.store_context(workflow_id, current_context, span) + streaming_context_manager.store_context(workflow_id, current_context, span) # Execute the original function within workflow context context_token = otel_context.attach(current_context) @@ -207,7 +199,7 @@ def wrapper(wrapped, instance, args, kwargs): span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) span.end() - _streaming_context_manager.remove_context(workflow_id) + streaming_context_manager.remove_context(workflow_id) raise else: # For non-streaming, use normal context manager @@ -240,7 +232,7 @@ def wrapper(wrapped, instance, args, kwargs): return wrapper -def create_streaming_workflow_async_wrapper(tracer): +def create_streaming_workflow_async_wrapper(tracer, streaming_context_manager): """Create a streaming-aware async wrapper for workflow run methods.""" async def wrapper(wrapped, instance, args, kwargs): @@ -263,7 +255,7 @@ async def wrapper(wrapped, instance, args, kwargs): # Store context for streaming - capture current context with active span current_context = trace.set_span_in_context(span, otel_context.get_current()) - _streaming_context_manager.store_context(workflow_id, current_context, span) + streaming_context_manager.store_context(workflow_id, current_context, span) # Execute the original function within workflow context context_token = otel_context.attach(current_context) @@ -290,7 +282,7 @@ async def wrapper(wrapped, instance, args, kwargs): span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) span.end() - _streaming_context_manager.remove_context(workflow_id) + streaming_context_manager.remove_context(workflow_id) raise else: # For non-streaming, use normal context manager @@ -323,7 +315,7 @@ async def wrapper(wrapped, instance, args, kwargs): return wrapper -def create_streaming_agent_wrapper(tracer): +def create_streaming_agent_wrapper(tracer, streaming_context_manager): """Create a streaming-aware wrapper for agent run methods.""" def wrapper(wrapped, instance, args, kwargs): @@ -349,11 +341,11 @@ def wrapper(wrapped, instance, args, kwargs): # Store context for streaming - capture current context with active span current_context = trace.set_span_in_context(span, otel_context.get_current()) - _streaming_context_manager.store_context(agent_id, current_context, span) + streaming_context_manager.store_context(agent_id, current_context, span) # Store session-to-agent mapping for LLM context lookup if session_id: - _streaming_context_manager.store_agent_session_mapping(session_id, agent_id) + streaming_context_manager.store_agent_session_mapping(session_id, agent_id) # Execute the original function within agent context context_token = otel_context.attach(current_context) @@ -374,18 +366,18 @@ def wrapper(wrapped, instance, args, kwargs): # Wrap the result to maintain context and end span when complete if hasattr(result, "__iter__"): - return StreamingResultWrapper(result, span, agent_id, current_context) + return StreamingResultWrapper(result, span, agent_id, current_context, streaming_context_manager) else: # Not actually streaming, clean up immediately span.end() - _streaming_context_manager.remove_context(agent_id) + streaming_context_manager.remove_context(agent_id) return result except Exception as e: span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) span.end() - _streaming_context_manager.remove_context(agent_id) + streaming_context_manager.remove_context(agent_id) raise else: # For non-streaming, use normal context manager @@ -418,7 +410,7 @@ def wrapper(wrapped, instance, args, kwargs): return wrapper -def create_streaming_agent_async_wrapper(tracer): +def create_streaming_agent_async_wrapper(tracer, streaming_context_manager): """Create a streaming-aware async wrapper for agent run methods.""" async def wrapper(wrapped, instance, args, kwargs): @@ -444,11 +436,11 @@ async def wrapper(wrapped, instance, args, kwargs): # Store context for streaming - capture current context with active span current_context = trace.set_span_in_context(span, otel_context.get_current()) - _streaming_context_manager.store_context(agent_id, current_context, span) + streaming_context_manager.store_context(agent_id, current_context, span) # Store session-to-agent mapping for LLM context lookup if session_id: - _streaming_context_manager.store_agent_session_mapping(session_id, agent_id) + streaming_context_manager.store_agent_session_mapping(session_id, agent_id) # Execute the original function within agent context context_token = otel_context.attach(current_context) @@ -469,18 +461,18 @@ async def wrapper(wrapped, instance, args, kwargs): # Wrap the result to maintain context and end span when complete if hasattr(result, "__iter__"): - return StreamingResultWrapper(result, span, agent_id, current_context) + return StreamingResultWrapper(result, span, agent_id, current_context, streaming_context_manager) else: # Not actually streaming, clean up immediately span.end() - _streaming_context_manager.remove_context(agent_id) + streaming_context_manager.remove_context(agent_id) return result except Exception as e: span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) span.end() - _streaming_context_manager.remove_context(agent_id) + streaming_context_manager.remove_context(agent_id) raise else: # For non-streaming, use normal context manager @@ -513,7 +505,7 @@ async def wrapper(wrapped, instance, args, kwargs): return wrapper -def create_streaming_tool_wrapper(tracer): +def create_streaming_tool_wrapper(tracer, streaming_context_manager): """Create a streaming-aware wrapper for tool execution methods.""" def wrapper(wrapped, instance, args, kwargs): @@ -527,7 +519,7 @@ def wrapper(wrapped, instance, args, kwargs): agent = instance._agent agent_id = getattr(agent, "agent_id", None) or getattr(agent, "id", None) or id(agent) agent_id = str(agent_id) - context_info = _streaming_context_manager.get_context(agent_id) + context_info = streaming_context_manager.get_context(agent_id) if context_info: parent_context, parent_span = context_info except Exception: @@ -542,7 +534,7 @@ def wrapper(wrapped, instance, args, kwargs): getattr(workflow, "workflow_id", None) or getattr(workflow, "id", None) or id(workflow) ) workflow_id = str(workflow_id) - context_info = _streaming_context_manager.get_context(workflow_id) + context_info = streaming_context_manager.get_context(workflow_id) if context_info: parent_context, parent_span = context_info except Exception: @@ -610,7 +602,7 @@ def wrapper(wrapped, instance, args, kwargs): return wrapper -def create_metrics_wrapper(tracer): +def create_metrics_wrapper(tracer, streaming_context_manager): """Create a wrapper for metrics methods with dynamic span naming.""" def wrapper(wrapped, instance, args, kwargs): @@ -647,7 +639,7 @@ def wrapper(wrapped, instance, args, kwargs): return wrapper -def create_team_internal_wrapper(tracer): +def create_team_internal_wrapper(tracer, streaming_context_manager): """Create a wrapper for Team internal methods (_run/_arun) that manages team span lifecycle.""" def wrapper(wrapped, instance, args, kwargs): @@ -656,7 +648,7 @@ def wrapper(wrapped, instance, args, kwargs): team_id = str(team_id) # Check if we already have a team context (from print_response) - existing_context = _streaming_context_manager.get_context(team_id) + existing_context = streaming_context_manager.get_context(team_id) if existing_context: # We're being called from print_response, use existing context @@ -686,7 +678,7 @@ def wrapper(wrapped, instance, args, kwargs): # Close the parent team span when workflow completes if parent_span: parent_span.end() - _streaming_context_manager.remove_context(team_id) + streaming_context_manager.remove_context(team_id) finally: otel_context.detach(context_token) else: @@ -712,7 +704,7 @@ def wrapper(wrapped, instance, args, kwargs): return wrapper -def create_team_internal_async_wrapper(tracer): +def create_team_internal_async_wrapper(tracer, streaming_context_manager): """Create an async wrapper for Team internal methods (_arun) that manages team span lifecycle.""" async def wrapper(wrapped, instance, args, kwargs): @@ -721,7 +713,7 @@ async def wrapper(wrapped, instance, args, kwargs): team_id = str(team_id) # Check if we already have a team context (from print_response) - existing_context = _streaming_context_manager.get_context(team_id) + existing_context = streaming_context_manager.get_context(team_id) if existing_context: # We're being called from print_response, use existing context @@ -751,7 +743,7 @@ async def wrapper(wrapped, instance, args, kwargs): # Close the parent team span when workflow completes if parent_span: parent_span.end() - _streaming_context_manager.remove_context(team_id) + streaming_context_manager.remove_context(team_id) finally: otel_context.detach(context_token) else: @@ -777,7 +769,7 @@ async def wrapper(wrapped, instance, args, kwargs): return wrapper -def create_team_wrapper(tracer): +def create_team_wrapper(tracer, streaming_context_manager): """Create a wrapper for Team methods that establishes the team context.""" def wrapper(wrapped, instance, args, kwargs): @@ -802,7 +794,7 @@ def wrapper(wrapped, instance, args, kwargs): # Store context for child spans current_context = trace.set_span_in_context(span, otel_context.get_current()) - _streaming_context_manager.store_context(team_id, current_context, span) + streaming_context_manager.store_context(team_id, current_context, span) # The span will be closed by the internal _run method # Just execute print_response normally @@ -813,7 +805,7 @@ def wrapper(wrapped, instance, args, kwargs): span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) span.end() - _streaming_context_manager.remove_context(team_id) + streaming_context_manager.remove_context(team_id) raise else: # For run/arun methods, use standard span management @@ -827,7 +819,7 @@ def wrapper(wrapped, instance, args, kwargs): # Store context for child spans current_context = trace.set_span_in_context(span, otel_context.get_current()) - _streaming_context_manager.store_context(team_id, current_context, span) + streaming_context_manager.store_context(team_id, current_context, span) # Execute the original function within team context context_token = otel_context.attach(current_context) @@ -836,11 +828,11 @@ def wrapper(wrapped, instance, args, kwargs): # For streaming results, wrap them to keep span alive if is_streaming and hasattr(result, "__iter__"): - return StreamingResultWrapper(result, span, team_id, current_context) + return StreamingResultWrapper(result, span, team_id, current_context, streaming_context_manager) else: # Non-streaming, close span span.end() - _streaming_context_manager.remove_context(team_id) + streaming_context_manager.remove_context(team_id) return result finally: @@ -850,13 +842,13 @@ def wrapper(wrapped, instance, args, kwargs): span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) span.end() - _streaming_context_manager.remove_context(team_id) + streaming_context_manager.remove_context(team_id) raise return wrapper -def create_team_async_wrapper(tracer): +def create_team_async_wrapper(tracer, streaming_context_manager): """Create an async wrapper for Team methods that establishes the team context.""" async def wrapper(wrapped, instance, args, kwargs): @@ -878,7 +870,7 @@ async def wrapper(wrapped, instance, args, kwargs): # Store context for child spans - capture current context with active span current_context = trace.set_span_in_context(span, otel_context.get_current()) - _streaming_context_manager.store_context(team_id, current_context, span) + streaming_context_manager.store_context(team_id, current_context, span) # Execute the original function within team context context_token = otel_context.attach(current_context) @@ -888,7 +880,7 @@ async def wrapper(wrapped, instance, args, kwargs): # For non-streaming, close the span if not is_streaming: span.end() - _streaming_context_manager.remove_context(team_id) + streaming_context_manager.remove_context(team_id) return result finally: @@ -898,7 +890,7 @@ async def wrapper(wrapped, instance, args, kwargs): span.set_status(Status(StatusCode.ERROR, str(e))) span.record_exception(e) span.end() - _streaming_context_manager.remove_context(team_id) + streaming_context_manager.remove_context(team_id) raise return wrapper @@ -918,161 +910,176 @@ def get_agent_context_for_llm(): return None, None -class AgnoInstrumentor(BaseInstrumentor): +class AgnoInstrumentor(BaseAgentOpsInstrumentor): """Agno instrumentation class.""" + def __init__(self): + """Initialize the Agno instrumentor.""" + super().__init__( + name="agno", + version="0.1.0", + library_name="agentops.instrumentation.agno", + ) + self._streaming_context_manager = StreamingContextManager() + def instrumentation_dependencies(self) -> Collection[str]: """Returns list of packages required for instrumentation.""" return ["agno >= 0.1.0"] - def _instrument(self, **kwargs): - """Install instrumentation for Agno.""" - tracer_provider = kwargs.get("tracer_provider") - tracer = get_tracer(LIBRARY_NAME, LIBRARY_VERSION, tracer_provider) - - meter_provider = kwargs.get("meter_provider") - meter = get_meter(LIBRARY_NAME, LIBRARY_VERSION, meter_provider) - - # Create metrics - meter.create_histogram( - name=Meters.LLM_TOKEN_USAGE, - unit="token", - description="Measures number of input and output tokens used with Agno agents", - ) - - meter.create_histogram( - name=Meters.LLM_OPERATION_DURATION, - unit="s", - description="Agno agent operation duration", + def _get_wrapped_methods(self) -> List[WrapConfig]: + """Return list of methods to be wrapped.""" + # Combine standard wrapped methods with custom streaming wraps + wrapped_methods = WRAPPED_METHODS.copy() + + # Add streaming method configurations + wrapped_methods.extend( + [ + # Streaming agent methods + WrapConfig( + trace_name="agno.agent.run.agent", + package="agno.agent", + class_name="Agent", + method_name="run", + handler=self._create_streaming_agent_wrapper, + ), + WrapConfig( + trace_name="agno.agent.run.agent", + package="agno.agent", + class_name="Agent", + method_name="arun", + handler=self._create_streaming_agent_async_wrapper, + ), + # Streaming workflow methods + WrapConfig( + trace_name="agno.workflow.run.workflow", + package="agno.workflow.workflow", + class_name="Workflow", + method_name="run_workflow", + handler=self._create_streaming_workflow_wrapper, + ), + WrapConfig( + trace_name="agno.workflow.run.workflow", + package="agno.workflow.workflow", + class_name="Workflow", + method_name="arun_workflow", + handler=self._create_streaming_workflow_async_wrapper, + ), + # Streaming tool execution + WrapConfig( + trace_name="agno.tool.execute.tool_usage", + package="agno.tools.function", + class_name="FunctionCall", + method_name="execute", + handler=self._create_streaming_tool_wrapper, + ), + # Metrics wrapper + WrapConfig( + trace_name="agno.agent.metrics", + package="agno.agent", + class_name="Agent", + method_name="_set_session_metrics", + handler=self._create_metrics_wrapper, + ), + # Team methods + WrapConfig( + trace_name="agno.team.run.agent", + package="agno.team.team", + class_name="Team", + method_name="run", + handler=self._create_team_wrapper, + ), + WrapConfig( + trace_name="agno.team.run.agent", + package="agno.team.team", + class_name="Team", + method_name="arun", + handler=self._create_team_async_wrapper, + ), + WrapConfig( + trace_name="agno.team.run.agent", + package="agno.team.team", + class_name="Team", + method_name="print_response", + handler=self._create_team_wrapper, + ), + # Team internal methods with special handling + WrapConfig( + trace_name="agno.team.run.workflow", + package="agno.team.team", + class_name="Team", + method_name="_run", + handler=self._create_team_internal_wrapper, + ), + WrapConfig( + trace_name="agno.team.run.workflow", + package="agno.team.team", + class_name="Team", + method_name="_arun", + handler=self._create_team_internal_async_wrapper, + ), + ] ) - meter.create_counter( - name=Meters.LLM_COMPLETIONS_EXCEPTIONS, - unit="time", - description="Number of exceptions occurred during Agno agent operations", - ) + return wrapped_methods - # Standard method wrapping using WrapConfig - for wrap_config in WRAPPED_METHODS: - try: - wrap(wrap_config, tracer) - except (AttributeError, ModuleNotFoundError): - logger.debug(f"Could not wrap {wrap_config}") + def _instrument(self, **kwargs): + """Install instrumentation for Agno.""" + # Call parent implementation + super()._instrument(**kwargs) - # Special handling for streaming methods - # These require custom wrappers due to their streaming nature - try: - # Streaming agent methods - wrap_function_wrapper( - "agno.agent", - "Agent.run", - create_streaming_agent_wrapper(tracer), - ) - wrap_function_wrapper( - "agno.agent", - "Agent.arun", - create_streaming_agent_async_wrapper(tracer), - ) - - # Streaming workflow methods - wrap_function_wrapper( - "agno.workflow.workflow", - "Workflow.run_workflow", - create_streaming_workflow_wrapper(tracer), - ) - wrap_function_wrapper( - "agno.workflow.workflow", - "Workflow.arun_workflow", - create_streaming_workflow_async_wrapper(tracer), - ) - - # Streaming tool execution - wrap_function_wrapper( - "agno.tools.function", - "FunctionCall.execute", - create_streaming_tool_wrapper(tracer), - ) - - # Metrics wrapper - wrap_function_wrapper( - "agno.agent", - "Agent._set_session_metrics", - create_metrics_wrapper(tracer), - ) - - # Team methods - wrap_function_wrapper( - "agno.team.team", - "Team.run", - create_team_wrapper(tracer), - ) - wrap_function_wrapper( - "agno.team.team", - "Team.arun", - create_team_async_wrapper(tracer), - ) - wrap_function_wrapper( - "agno.team.team", - "Team.print_response", - create_team_wrapper(tracer), - ) - - # Team internal methods with special handling - wrap_function_wrapper( - "agno.team.team", - "Team._run", - create_team_internal_wrapper(tracer), - ) - wrap_function_wrapper( - "agno.team.team", - "Team._arun", - create_team_internal_async_wrapper(tracer), - ) - - logger.debug("Successfully wrapped Agno streaming methods") - except (AttributeError, ModuleNotFoundError) as e: - logger.debug(f"Failed to wrap Agno streaming methods: {e}") + # Create standard metrics for LLM operations + self._metrics = StandardMetrics(self._meter) + self._metrics.create_llm_metrics(system_name="agno", operation_description="Agno agent operation") logger.info("Agno instrumentation installed successfully") def _uninstrument(self, **kwargs): """Remove instrumentation for Agno.""" # Clear streaming contexts - _streaming_context_manager.clear_all() + self._streaming_context_manager.clear_all() - # Unwrap standard methods - for wrap_config in WRAPPED_METHODS: - try: - unwrap(wrap_config) - except Exception: - logger.debug(f"Failed to unwrap {wrap_config}") + # Call parent implementation + super()._uninstrument(**kwargs) - # Unwrap streaming methods - try: - from opentelemetry.instrumentation.utils import unwrap as otel_unwrap + logger.info("Agno instrumentation removed successfully") - # Agent methods - otel_unwrap("agno.agent", "Agent.run") - otel_unwrap("agno.agent", "Agent.arun") + # Method wrappers converted to instance methods + def _create_streaming_agent_wrapper(self, args=None, kwargs=None, return_value=None): + """Wrapper function for streaming agent methods.""" + return create_streaming_agent_wrapper(self._tracer, self._streaming_context_manager) - # Workflow methods - otel_unwrap("agno.workflow.workflow", "Workflow.run_workflow") - otel_unwrap("agno.workflow.workflow", "Workflow.arun_workflow") + def _create_streaming_agent_async_wrapper(self, args=None, kwargs=None, return_value=None): + """Wrapper function for async streaming agent methods.""" + return create_streaming_agent_async_wrapper(self._tracer, self._streaming_context_manager) - # Tool methods - otel_unwrap("agno.tools.function", "FunctionCall.execute") + def _create_streaming_workflow_wrapper(self, args=None, kwargs=None, return_value=None): + """Wrapper function for streaming workflow methods.""" + return create_streaming_workflow_wrapper(self._tracer, self._streaming_context_manager) - # Metrics methods - otel_unwrap("agno.agent", "Agent._set_session_metrics") + def _create_streaming_workflow_async_wrapper(self, args=None, kwargs=None, return_value=None): + """Wrapper function for async streaming workflow methods.""" + return create_streaming_workflow_async_wrapper(self._tracer, self._streaming_context_manager) - # Team methods - otel_unwrap("agno.team.team", "Team.run") - otel_unwrap("agno.team.team", "Team.arun") - otel_unwrap("agno.team.team", "Team.print_response") - otel_unwrap("agno.team.team", "Team._run") - otel_unwrap("agno.team.team", "Team._arun") + def _create_streaming_tool_wrapper(self, args=None, kwargs=None, return_value=None): + """Wrapper function for streaming tool methods.""" + return create_streaming_tool_wrapper(self._tracer, self._streaming_context_manager) - except (AttributeError, ModuleNotFoundError): - logger.debug("Failed to unwrap Agno streaming methods") + def _create_metrics_wrapper(self, args=None, kwargs=None, return_value=None): + """Wrapper function for metrics methods.""" + return create_metrics_wrapper(self._tracer, self._streaming_context_manager) - logger.info("Agno instrumentation removed successfully") + def _create_team_wrapper(self, args=None, kwargs=None, return_value=None): + """Wrapper function for team methods.""" + return create_team_wrapper(self._tracer, self._streaming_context_manager) + + def _create_team_async_wrapper(self, args=None, kwargs=None, return_value=None): + """Wrapper function for async team methods.""" + return create_team_async_wrapper(self._tracer, self._streaming_context_manager) + + def _create_team_internal_wrapper(self, args=None, kwargs=None, return_value=None): + """Wrapper function for team internal methods.""" + return create_team_internal_wrapper(self._tracer, self._streaming_context_manager) + + def _create_team_internal_async_wrapper(self, args=None, kwargs=None, return_value=None): + """Wrapper function for async team internal methods.""" + return create_team_internal_async_wrapper(self._tracer, self._streaming_context_manager) diff --git a/agentops/instrumentation/anthropic/instrumentor.py b/agentops/instrumentation/anthropic/instrumentor.py index fdaae4f33..dca75564e 100644 --- a/agentops/instrumentation/anthropic/instrumentor.py +++ b/agentops/instrumentation/anthropic/instrumentor.py @@ -28,62 +28,22 @@ - Maintains span context across multiple events """ -from typing import List, Collection -from opentelemetry.trace import get_tracer -from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.metrics import get_meter +from typing import Dict, Any from wrapt import wrap_function_wrapper from agentops.logging import logger -from agentops.instrumentation.common.wrappers import WrapConfig, wrap, unwrap +from agentops.instrumentation.common import BaseAgentOpsInstrumentor, InstrumentorConfig, WrapConfig, StandardMetrics from agentops.instrumentation.anthropic import LIBRARY_NAME, LIBRARY_VERSION from agentops.instrumentation.anthropic.attributes.message import get_message_attributes, get_completion_attributes from agentops.instrumentation.anthropic.stream_wrapper import ( messages_stream_wrapper, messages_stream_async_wrapper, ) -from agentops.semconv import Meters - -# Methods to wrap for instrumentation -WRAPPED_METHODS: List[WrapConfig] = [ - # Main messages.create (modern API) - WrapConfig( - trace_name="anthropic.messages.create", - package="anthropic.resources.messages", - class_name="Messages", - method_name="create", - handler=get_message_attributes, - ), - # Async variant - WrapConfig( - trace_name="anthropic.messages.create", - package="anthropic.resources.messages", - class_name="AsyncMessages", - method_name="create", - handler=get_message_attributes, - is_async=True, - ), - # Legacy completions API - WrapConfig( - trace_name="anthropic.completions.create", - package="anthropic.resources.completions", - class_name="Completions", - method_name="create", - handler=get_completion_attributes, - ), - # Async variant of legacy API - WrapConfig( - trace_name="anthropic.completions.create", - package="anthropic.resources.completions", - class_name="AsyncCompletions", - method_name="create", - handler=get_completion_attributes, - is_async=True, - ), -] - - -class AnthropicInstrumentor(BaseInstrumentor): +from opentelemetry.metrics import Meter +from opentelemetry.instrumentation.utils import unwrap as otel_unwrap + + +class AnthropicInstrumentor(BaseAgentOpsInstrumentor): """An instrumentor for Anthropic's Claude API. This class provides instrumentation for Anthropic's Claude API by wrapping key methods @@ -98,56 +58,63 @@ class AnthropicInstrumentor(BaseInstrumentor): It captures metrics including token usage, operation duration, and exceptions. """ - def instrumentation_dependencies(self) -> Collection[str]: - """Return packages required for instrumentation. - - Returns: - A collection of package specifications required for this instrumentation. - """ - return ["anthropic >= 0.7.0"] - - def _instrument(self, **kwargs): - """Instrument the Anthropic API. - - This method wraps the key methods in the Anthropic client to capture - telemetry data for API calls. It sets up tracers, meters, and wraps the appropriate - methods for instrumentation. - - Args: - **kwargs: Configuration options for instrumentation. - """ - tracer_provider = kwargs.get("tracer_provider") - tracer = get_tracer(LIBRARY_NAME, LIBRARY_VERSION, tracer_provider) - - meter_provider = kwargs.get("meter_provider") - meter = get_meter(LIBRARY_NAME, LIBRARY_VERSION, meter_provider) - - meter.create_histogram( - name=Meters.LLM_TOKEN_USAGE, - unit="token", - description="Measures number of input and output tokens used with Anthropic models", + def __init__(self): + # Define wrapped methods + wrapped_methods = [ + # Main messages.create (modern API) + WrapConfig( + trace_name="anthropic.messages.create", + package="anthropic.resources.messages", + class_name="Messages", + method_name="create", + handler=get_message_attributes, + ), + # Async variant + WrapConfig( + trace_name="anthropic.messages.create", + package="anthropic.resources.messages", + class_name="AsyncMessages", + method_name="create", + handler=get_message_attributes, + is_async=True, + ), + # Legacy completions API + WrapConfig( + trace_name="anthropic.completions.create", + package="anthropic.resources.completions", + class_name="Completions", + method_name="create", + handler=get_completion_attributes, + ), + # Async variant of legacy API + WrapConfig( + trace_name="anthropic.completions.create", + package="anthropic.resources.completions", + class_name="AsyncCompletions", + method_name="create", + handler=get_completion_attributes, + is_async=True, + ), + ] + + # Create instrumentor config + config = InstrumentorConfig( + library_name=LIBRARY_NAME, + library_version=LIBRARY_VERSION, + wrapped_methods=wrapped_methods, + metrics_enabled=True, + dependencies=["anthropic >= 0.7.0"], ) - meter.create_histogram( - name=Meters.LLM_OPERATION_DURATION, - unit="s", - description="Anthropic API operation duration", - ) - - meter.create_counter( - name=Meters.LLM_COMPLETIONS_EXCEPTIONS, - unit="time", - description="Number of exceptions occurred during Anthropic completions", - ) + super().__init__(config) - # Standard method wrapping approach - # Uses the common wrappers module to wrap methods with tracers - for wrap_config in WRAPPED_METHODS: - try: - wrap(wrap_config, tracer) - except (AttributeError, ModuleNotFoundError): - logger.debug(f"Could not wrap {wrap_config.package}.{wrap_config.class_name}.{wrap_config.method_name}") + def _create_metrics(self, meter: Meter) -> Dict[str, Any]: + """Create metrics for Anthropic instrumentation.""" + # Use standard metrics from common module + return StandardMetrics.create_standard_metrics(meter) + def _custom_wrap(self, **kwargs): + """Perform custom wrapping for streaming methods.""" # Special handling for streaming responses # Uses direct wrapt.wrap_function_wrapper for stream methods # This approach captures events as they arrive rather than waiting for completion @@ -155,39 +122,21 @@ def _instrument(self, **kwargs): wrap_function_wrapper( "anthropic.resources.messages.messages", "Messages.stream", - messages_stream_wrapper(tracer), + messages_stream_wrapper(self._tracer), ) wrap_function_wrapper( "anthropic.resources.messages.messages", "AsyncMessages.stream", - messages_stream_async_wrapper(tracer), + messages_stream_async_wrapper(self._tracer), ) except (AttributeError, ModuleNotFoundError): logger.debug("Failed to wrap Anthropic streaming methods") - def _uninstrument(self, **kwargs): - """Remove instrumentation from Anthropic API. - - This method unwraps all methods that were wrapped during instrumentation, - restoring the original behavior of the Anthropic API. - - Args: - **kwargs: Configuration options for uninstrumentation. - """ - # Unwrap standard methods - for wrap_config in WRAPPED_METHODS: - try: - unwrap(wrap_config) - except Exception: - logger.debug( - f"Failed to unwrap {wrap_config.package}.{wrap_config.class_name}.{wrap_config.method_name}" - ) - + def _custom_unwrap(self, **kwargs): + """Perform custom unwrapping for streaming methods.""" # Unwrap streaming methods try: - from opentelemetry.instrumentation.utils import unwrap as otel_unwrap - otel_unwrap("anthropic.resources.messages.messages", "Messages.stream") otel_unwrap("anthropic.resources.messages.messages", "AsyncMessages.stream") except (AttributeError, ModuleNotFoundError): diff --git a/agentops/instrumentation/common/__init__.py b/agentops/instrumentation/common/__init__.py index 45178a2da..9b734c320 100644 --- a/agentops/instrumentation/common/__init__.py +++ b/agentops/instrumentation/common/__init__.py @@ -1,4 +1,75 @@ +"""Common utilities for AgentOps instrumentation. + +This module provides shared functionality for instrumenting various libraries, +including base classes, attribute management, metrics, and streaming utilities. +""" + from agentops.instrumentation.common.attributes import AttributeMap, _extract_attributes_from_mapping -from agentops.instrumentation.common.wrappers import _with_tracer_wrapper +from agentops.instrumentation.common.wrappers import _with_tracer_wrapper, WrapConfig, wrap, unwrap +from agentops.instrumentation.common.instrumentor import ( + InstrumentorConfig, + BaseAgentOpsInstrumentor, + create_wrapper_factory, +) +from agentops.instrumentation.common.metrics import StandardMetrics, MetricsRecorder +from agentops.instrumentation.common.span_management import ( + SpanAttributeManager, + create_span, + timed_span, + StreamingSpanManager, + extract_parent_context, + safe_set_attribute, + get_span_context_info, +) +from agentops.instrumentation.common.token_counting import ( + TokenUsage, + TokenUsageExtractor, + calculate_token_efficiency, + calculate_cache_efficiency, + set_token_usage_attributes, +) +from agentops.instrumentation.common.streaming import ( + BaseStreamWrapper, + SyncStreamWrapper, + AsyncStreamWrapper, + create_stream_wrapper_factory, + StreamingResponseHandler, +) -__all__ = ["AttributeMap", "_extract_attributes_from_mapping", "_with_tracer_wrapper"] +__all__ = [ + # Attributes + "AttributeMap", + "_extract_attributes_from_mapping", + # Wrappers + "_with_tracer_wrapper", + "WrapConfig", + "wrap", + "unwrap", + # Instrumentor + "InstrumentorConfig", + "BaseAgentOpsInstrumentor", + "create_wrapper_factory", + # Metrics + "StandardMetrics", + "MetricsRecorder", + # Span Management + "SpanAttributeManager", + "create_span", + "timed_span", + "StreamingSpanManager", + "extract_parent_context", + "safe_set_attribute", + "get_span_context_info", + # Token Counting + "TokenUsage", + "TokenUsageExtractor", + "calculate_token_efficiency", + "calculate_cache_efficiency", + "set_token_usage_attributes", + # Streaming + "BaseStreamWrapper", + "SyncStreamWrapper", + "AsyncStreamWrapper", + "create_stream_wrapper_factory", + "StreamingResponseHandler", +] diff --git a/agentops/instrumentation/common/instrumentor.py b/agentops/instrumentation/common/instrumentor.py new file mode 100644 index 000000000..5011a2a5a --- /dev/null +++ b/agentops/instrumentation/common/instrumentor.py @@ -0,0 +1,148 @@ +"""Base instrumentor utilities for AgentOps instrumentation. + +This module provides base classes and utilities for creating instrumentors, +reducing boilerplate code across different provider instrumentations. +""" + +from abc import ABC, abstractmethod +from typing import Collection, Dict, List, Optional, Any, Callable +from dataclasses import dataclass, field + +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.trace import Tracer, get_tracer +from opentelemetry.metrics import Meter, get_meter + +from agentops.instrumentation.common.wrappers import WrapConfig, wrap, unwrap +from agentops.logging import logger + + +@dataclass +class InstrumentorConfig: + """Configuration for an instrumentor.""" + + library_name: str + library_version: str + wrapped_methods: List[WrapConfig] = field(default_factory=list) + metrics_enabled: bool = True + dependencies: Collection[str] = field(default_factory=list) + + +class BaseAgentOpsInstrumentor(BaseInstrumentor, ABC): + """Base class for AgentOps instrumentors with common functionality.""" + + def __init__(self, config: InstrumentorConfig): + super().__init__() + self.config = config + self._tracer: Optional[Tracer] = None + self._meter: Optional[Meter] = None + self._metrics: Dict[str, Any] = {} + + def instrumentation_dependencies(self) -> Collection[str]: + """Return required dependencies.""" + return self.config.dependencies + + def _instrument(self, **kwargs): + """Instrument the target library.""" + # Initialize tracer + tracer_provider = kwargs.get("tracer_provider") + self._tracer = get_tracer(self.config.library_name, self.config.library_version, tracer_provider) + + # Initialize meter if metrics enabled + if self.config.metrics_enabled: + meter_provider = kwargs.get("meter_provider") + self._meter = get_meter(self.config.library_name, self.config.library_version, meter_provider) + self._metrics = self._create_metrics(self._meter) + + # Perform custom initialization + self._initialize(**kwargs) + + # Wrap all configured methods + self._wrap_methods() + + # Perform custom wrapping + self._custom_wrap(**kwargs) + + def _uninstrument(self, **kwargs): + """Remove instrumentation.""" + # Unwrap all configured methods + for wrap_config in self.config.wrapped_methods: + try: + unwrap(wrap_config) + except Exception as e: + logger.debug( + f"Failed to unwrap {wrap_config.package}." + f"{wrap_config.class_name}.{wrap_config.method_name}: {e}" + ) + + # Perform custom unwrapping + self._custom_unwrap(**kwargs) + + # Clear references + self._tracer = None + self._meter = None + self._metrics.clear() + + def _wrap_methods(self): + """Wrap all configured methods.""" + for wrap_config in self.config.wrapped_methods: + try: + wrap(wrap_config, self._tracer) + except (AttributeError, ModuleNotFoundError) as e: + logger.debug( + f"Could not wrap {wrap_config.package}." f"{wrap_config.class_name}.{wrap_config.method_name}: {e}" + ) + + @abstractmethod + def _create_metrics(self, meter: Meter) -> Dict[str, Any]: + """Create metrics for the instrumentor. + + Returns a dictionary of metric name to metric instance. + """ + pass + + def _initialize(self, **kwargs): + """Perform custom initialization. + + Override in subclasses for custom initialization logic. + """ + pass + + def _custom_wrap(self, **kwargs): + """Perform custom wrapping beyond configured methods. + + Override in subclasses for special wrapping needs. + """ + pass + + def _custom_unwrap(self, **kwargs): + """Perform custom unwrapping beyond configured methods. + + Override in subclasses for special unwrapping needs. + """ + pass + + +def create_wrapper_factory(wrapper_func: Callable, *wrapper_args, **wrapper_kwargs) -> Callable: + """Create a factory function for wrapt-style wrappers. + + This is useful for creating wrappers that need additional arguments + beyond the standard (wrapped, instance, args, kwargs). + + Args: + wrapper_func: The wrapper function to call + *wrapper_args: Arguments to pass to the wrapper + **wrapper_kwargs: Keyword arguments to pass to the wrapper + + Returns: + A factory function that returns the configured wrapper + """ + + def factory(tracer: Tracer): + def wrapper(wrapped, instance, args, kwargs): + return wrapper_func( + tracer, *wrapper_args, wrapped=wrapped, instance=instance, args=args, kwargs=kwargs, **wrapper_kwargs + ) + + return wrapper + + return factory diff --git a/agentops/instrumentation/common/metrics.py b/agentops/instrumentation/common/metrics.py new file mode 100644 index 000000000..a326ec311 --- /dev/null +++ b/agentops/instrumentation/common/metrics.py @@ -0,0 +1,100 @@ +"""Common metrics utilities for AgentOps instrumentation. + +This module provides utilities for creating and managing standard metrics +across different instrumentations. +""" + +from typing import Dict, Any, Optional +from opentelemetry.metrics import Meter, Histogram, Counter +from agentops.semconv import Meters + + +class StandardMetrics: + """Factory for creating standard metrics used across instrumentations.""" + + @staticmethod + def create_token_histogram(meter: Meter) -> Histogram: + """Create a histogram for token usage.""" + return meter.create_histogram( + name=Meters.LLM_TOKEN_USAGE, unit="token", description="Measures number of input and output tokens used" + ) + + @staticmethod + def create_duration_histogram(meter: Meter) -> Histogram: + """Create a histogram for operation duration.""" + return meter.create_histogram( + name=Meters.LLM_OPERATION_DURATION, unit="s", description="GenAI operation duration" + ) + + @staticmethod + def create_exception_counter(meter: Meter, name: str = Meters.LLM_COMPLETIONS_EXCEPTIONS) -> Counter: + """Create a counter for exceptions.""" + return meter.create_counter( + name=name, unit="time", description="Number of exceptions occurred during operations" + ) + + @staticmethod + def create_choice_counter(meter: Meter) -> Counter: + """Create a counter for generation choices.""" + return meter.create_counter( + name=Meters.LLM_GENERATION_CHOICES, + unit="choice", + description="Number of choices returned by completions call", + ) + + @staticmethod + def create_standard_metrics(meter: Meter) -> Dict[str, Any]: + """Create a standard set of metrics for LLM operations. + + Returns: + Dictionary with metric names as keys and metric instances as values + """ + return { + "token_histogram": StandardMetrics.create_token_histogram(meter), + "duration_histogram": StandardMetrics.create_duration_histogram(meter), + "exception_counter": StandardMetrics.create_exception_counter(meter), + } + + +class MetricsRecorder: + """Utility class for recording metrics in a consistent way.""" + + def __init__(self, metrics: Dict[str, Any]): + self.metrics = metrics + + def record_token_usage( + self, + prompt_tokens: Optional[int] = None, + completion_tokens: Optional[int] = None, + attributes: Optional[Dict[str, Any]] = None, + ): + """Record token usage metrics.""" + token_histogram = self.metrics.get("token_histogram") + if not token_histogram: + return + + attrs = attributes or {} + + if prompt_tokens is not None: + token_histogram.record(prompt_tokens, attributes={**attrs, "token.type": "input"}) + + if completion_tokens is not None: + token_histogram.record(completion_tokens, attributes={**attrs, "token.type": "output"}) + + def record_duration(self, duration: float, attributes: Optional[Dict[str, Any]] = None): + """Record operation duration.""" + duration_histogram = self.metrics.get("duration_histogram") + if duration_histogram: + duration_histogram.record(duration, attributes=attributes or {}) + + def record_exception(self, attributes: Optional[Dict[str, Any]] = None): + """Record an exception occurrence.""" + exception_counter = self.metrics.get("exception_counter") + if exception_counter: + exception_counter.add(1, attributes=attributes or {}) + + def record_choices(self, count: int, attributes: Optional[Dict[str, Any]] = None): + """Record number of choices returned.""" + choice_counter = self.metrics.get("choice_counter") + if choice_counter: + choice_counter.add(count, attributes=attributes or {}) diff --git a/agentops/instrumentation/common/span_management.py b/agentops/instrumentation/common/span_management.py new file mode 100644 index 000000000..9d3f4b26d --- /dev/null +++ b/agentops/instrumentation/common/span_management.py @@ -0,0 +1,174 @@ +"""Common span management utilities for AgentOps instrumentation. + +This module provides utilities for creating and managing spans with +consistent attributes and error handling. +""" + +import time +from contextlib import contextmanager +from typing import Optional, Dict, Any, Callable, Tuple +from functools import wraps + +from opentelemetry.trace import Tracer, Span, SpanKind, Status, StatusCode, get_current_span +from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT +from opentelemetry import context as context_api + +from agentops.logging import logger +from agentops.semconv import CoreAttributes +from agentops import get_client + + +class SpanAttributeManager: + """Manages common span attributes across instrumentations.""" + + def __init__(self, service_name: str = "agentops", deployment_environment: str = "production"): + self.service_name = service_name + self.deployment_environment = deployment_environment + + def set_common_attributes(self, span: Span): + """Set common attributes on a span.""" + span.set_attribute(TELEMETRY_SDK_NAME, "agentops") + span.set_attribute(SERVICE_NAME, self.service_name) + span.set_attribute(DEPLOYMENT_ENVIRONMENT, self.deployment_environment) + + def set_config_tags(self, span: Span): + """Set tags from AgentOps config on a span.""" + config = get_client().config + if config.default_tags and len(config.default_tags) > 0: + tag_list = list(config.default_tags) + span.set_attribute(CoreAttributes.TAGS, tag_list) + + +@contextmanager +def create_span( + tracer: Tracer, + name: str, + kind: SpanKind = SpanKind.CLIENT, + attributes: Optional[Dict[str, Any]] = None, + set_common_attributes: bool = True, + attribute_manager: Optional[SpanAttributeManager] = None, +): + """Context manager for creating spans with consistent error handling. + + Args: + tracer: The tracer to use for creating the span + name: The name of the span + kind: The kind of span to create + attributes: Initial attributes to set on the span + set_common_attributes: Whether to set common attributes + attribute_manager: Optional attribute manager for setting common attributes + + Yields: + The created span + """ + with tracer.start_as_current_span(name, kind=kind, attributes=attributes or {}) as span: + try: + if set_common_attributes and attribute_manager: + attribute_manager.set_common_attributes(span) + yield span + span.set_status(Status(StatusCode.OK)) + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + logger.error(f"Error in span {name}: {e}") + raise + + +def timed_span(tracer: Tracer, name: str, record_duration: Optional[Callable[[float], None]] = None, **span_kwargs): + """Decorator for creating timed spans around functions. + + Args: + tracer: The tracer to use + name: The name of the span + record_duration: Optional callback to record duration + **span_kwargs: Additional arguments for span creation + """ + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + start_time = time.time() + with create_span(tracer, name, **span_kwargs): + result = func(*args, **kwargs) + if record_duration: + duration = time.time() - start_time + record_duration(duration) + return result + + return wrapper + + return decorator + + +class StreamingSpanManager: + """Manages spans for streaming operations.""" + + def __init__(self, tracer: Tracer): + self.tracer = tracer + self._active_spans: Dict[Any, Span] = {} + + def start_streaming_span(self, stream_id: Any, name: str, **span_kwargs) -> Span: + """Start a span for a streaming operation.""" + span = self.tracer.start_span(name, **span_kwargs) + self._active_spans[stream_id] = span + return span + + def get_streaming_span(self, stream_id: Any) -> Optional[Span]: + """Get an active streaming span.""" + return self._active_spans.get(stream_id) + + def end_streaming_span(self, stream_id: Any, status: Optional[Status] = None): + """End a streaming span.""" + span = self._active_spans.pop(stream_id, None) + if span: + if status: + span.set_status(status) + else: + span.set_status(Status(StatusCode.OK)) + span.end() + + +def extract_parent_context(parent_span: Optional[Span] = None) -> Any: + """Extract parent context for span creation. + + Args: + parent_span: Optional parent span to use + + Returns: + Context to use as parent + """ + if parent_span: + from opentelemetry.trace import set_span_in_context + + return set_span_in_context(parent_span) + return context_api.get_current() + + +def safe_set_attribute(span: Span, key: str, value: Any, max_length: int = 1000): + """Safely set an attribute on a span, handling None values and truncating long strings.""" + if value is None: + return + + if isinstance(value, str) and len(value) > max_length: + value = value[: max_length - 3] + "..." + + try: + span.set_attribute(key, value) + except Exception as e: + logger.debug(f"Failed to set span attribute {key}: {e}") + + +def get_span_context_info(span: Optional[Span] = None) -> Tuple[str, str]: + """Get trace and span IDs from a span for debugging. + + Returns: + Tuple of (trace_id, span_id) as strings + """ + if not span: + span = get_current_span() + + span_context = span.get_span_context() + trace_id = format(span_context.trace_id, "032x") if span_context.trace_id else "unknown" + span_id = format(span_context.span_id, "016x") if span_context.span_id else "unknown" + + return trace_id, span_id diff --git a/agentops/instrumentation/common/streaming.py b/agentops/instrumentation/common/streaming.py new file mode 100644 index 000000000..eaf320558 --- /dev/null +++ b/agentops/instrumentation/common/streaming.py @@ -0,0 +1,218 @@ +"""Common streaming utilities for handling streaming responses. + +This module provides utilities for instrumenting streaming API responses +in a consistent way across different providers. +""" + +from typing import Optional, Any, Dict, Callable +from abc import ABC +import time + +from opentelemetry.trace import Tracer, Span, Status, StatusCode + +from agentops.logging import logger +from agentops.instrumentation.common.span_management import safe_set_attribute +from agentops.instrumentation.common.token_counting import TokenUsage, TokenUsageExtractor + + +class BaseStreamWrapper(ABC): + """Base class for wrapping streaming responses.""" + + def __init__( + self, + stream: Any, + span: Span, + extract_chunk_content: Callable[[Any], Optional[str]], + extract_chunk_attributes: Optional[Callable[[Any], Dict[str, Any]]] = None, + ): + self.stream = stream + self.span = span + self.extract_chunk_content = extract_chunk_content + self.extract_chunk_attributes = extract_chunk_attributes or (lambda x: {}) + + self.start_time = time.time() + self.first_token_time: Optional[float] = None + self.chunks_received = 0 + self.accumulated_content = [] + self.token_usage = TokenUsage() + + def _process_chunk(self, chunk: Any): + """Process a single chunk from the stream.""" + # Record time to first token + if self.first_token_time is None: + self.first_token_time = time.time() + time_to_first_token = self.first_token_time - self.start_time + safe_set_attribute(self.span, "streaming.time_to_first_token", time_to_first_token) + + self.chunks_received += 1 + + # Extract content from chunk + content = self.extract_chunk_content(chunk) + if content: + self.accumulated_content.append(content) + + # Extract and set additional attributes + attributes = self.extract_chunk_attributes(chunk) + for key, value in attributes.items(): + safe_set_attribute(self.span, key, value) + + # Try to extract token usage if available + if hasattr(chunk, "usage") or hasattr(chunk, "usage_metadata"): + chunk_usage = TokenUsageExtractor.extract_from_response(chunk) + # Accumulate token counts + if chunk_usage.prompt_tokens: + self.token_usage.prompt_tokens = chunk_usage.prompt_tokens + if chunk_usage.completion_tokens: + self.token_usage.completion_tokens = ( + self.token_usage.completion_tokens or 0 + ) + chunk_usage.completion_tokens + + def _finalize(self): + """Finalize the stream processing.""" + try: + # Set final content + final_content = "".join(self.accumulated_content) + safe_set_attribute(self.span, "streaming.final_content", final_content) + safe_set_attribute(self.span, "streaming.chunk_count", self.chunks_received) + + # Set timing metrics + total_time = time.time() - self.start_time + safe_set_attribute(self.span, "streaming.total_duration", total_time) + + if self.first_token_time: + generation_time = time.time() - self.first_token_time + safe_set_attribute(self.span, "streaming.generation_duration", generation_time) + + # Set token usage + for attr_name, value in self.token_usage.to_attributes().items(): + safe_set_attribute(self.span, attr_name, value) + + self.span.set_status(Status(StatusCode.OK)) + except Exception as e: + logger.error(f"Error finalizing stream: {e}") + self.span.set_status(Status(StatusCode.ERROR, str(e))) + self.span.record_exception(e) + finally: + self.span.end() + + +class SyncStreamWrapper(BaseStreamWrapper): + """Wrapper for synchronous streaming responses.""" + + def __iter__(self): + try: + for chunk in self.stream: + self._process_chunk(chunk) + yield chunk + except Exception as e: + self.span.set_status(Status(StatusCode.ERROR, str(e))) + self.span.record_exception(e) + raise + finally: + self._finalize() + + +class AsyncStreamWrapper(BaseStreamWrapper): + """Wrapper for asynchronous streaming responses.""" + + async def __aiter__(self): + try: + async for chunk in self.stream: + self._process_chunk(chunk) + yield chunk + except Exception as e: + self.span.set_status(Status(StatusCode.ERROR, str(e))) + self.span.record_exception(e) + raise + finally: + self._finalize() + + +def create_stream_wrapper_factory( + tracer: Tracer, + span_name: str, + extract_chunk_content: Callable[[Any], Optional[str]], + extract_chunk_attributes: Optional[Callable[[Any], Dict[str, Any]]] = None, + initial_attributes: Optional[Dict[str, Any]] = None, +) -> Callable: + """Create a factory function for wrapping streaming methods. + + Args: + tracer: The tracer to use for creating spans + span_name: Name for the streaming span + extract_chunk_content: Function to extract content from chunks + extract_chunk_attributes: Optional function to extract attributes from chunks + initial_attributes: Initial attributes to set on the span + + Returns: + A wrapper function suitable for use with wrapt + """ + + def wrapper(wrapped, instance, args, kwargs): + # Start the span + span = tracer.start_span(span_name) + + # Set initial attributes + if initial_attributes: + for key, value in initial_attributes.items(): + safe_set_attribute(span, key, value) + + try: + # Call the wrapped method + stream = wrapped(*args, **kwargs) + + # Determine if it's async or sync + if hasattr(stream, "__aiter__"): + return AsyncStreamWrapper(stream, span, extract_chunk_content, extract_chunk_attributes) + else: + return SyncStreamWrapper(stream, span, extract_chunk_content, extract_chunk_attributes) + except Exception as e: + span.set_status(Status(StatusCode.ERROR, str(e))) + span.record_exception(e) + span.end() + raise + + return wrapper + + +class StreamingResponseHandler: + """Handles common patterns for streaming responses.""" + + @staticmethod + def extract_openai_chunk_content(chunk: Any) -> Optional[str]: + """Extract content from OpenAI-style streaming chunks.""" + if hasattr(chunk, "choices") and chunk.choices: + delta = getattr(chunk.choices[0], "delta", None) + if delta and hasattr(delta, "content"): + return delta.content + return None + + @staticmethod + def extract_anthropic_chunk_content(chunk: Any) -> Optional[str]: + """Extract content from Anthropic-style streaming chunks.""" + if hasattr(chunk, "type"): + if chunk.type == "content_block_delta": + if hasattr(chunk, "delta") and hasattr(chunk.delta, "text"): + return chunk.delta.text + elif chunk.type == "message_delta": + if hasattr(chunk, "delta") and hasattr(chunk.delta, "content"): + return chunk.delta.content + return None + + @staticmethod + def extract_generic_chunk_content(chunk: Any) -> Optional[str]: + """Extract content from generic streaming chunks.""" + # Try common patterns + if hasattr(chunk, "content"): + return str(chunk.content) + elif hasattr(chunk, "text"): + return str(chunk.text) + elif hasattr(chunk, "delta"): + delta = chunk.delta + if hasattr(delta, "content"): + return str(delta.content) + elif hasattr(delta, "text"): + return str(delta.text) + elif isinstance(chunk, str): + return chunk + return None diff --git a/agentops/instrumentation/common/token_counting.py b/agentops/instrumentation/common/token_counting.py new file mode 100644 index 000000000..c467a78c9 --- /dev/null +++ b/agentops/instrumentation/common/token_counting.py @@ -0,0 +1,173 @@ +"""Common token counting and usage extraction utilities. + +This module provides utilities for extracting and recording token usage +information from various response formats. +""" + +from typing import Dict, Any, Optional +from dataclasses import dataclass + +from agentops.logging import logger +from agentops.semconv import SpanAttributes + + +@dataclass +class TokenUsage: + """Represents token usage information.""" + + prompt_tokens: Optional[int] = None + completion_tokens: Optional[int] = None + total_tokens: Optional[int] = None + cached_prompt_tokens: Optional[int] = None + cached_read_tokens: Optional[int] = None + reasoning_tokens: Optional[int] = None + + def to_attributes(self) -> Dict[str, int]: + """Convert to span attributes dictionary.""" + attributes = {} + + if self.prompt_tokens is not None: + attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = self.prompt_tokens + + if self.completion_tokens is not None: + attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = self.completion_tokens + + if self.total_tokens is not None: + attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = self.total_tokens + + if self.cached_prompt_tokens is not None: + attributes[SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS] = self.cached_prompt_tokens + + if self.cached_read_tokens is not None: + attributes[SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS] = self.cached_read_tokens + + if self.reasoning_tokens is not None: + attributes[SpanAttributes.LLM_USAGE_REASONING_TOKENS] = self.reasoning_tokens + + return attributes + + +class TokenUsageExtractor: + """Extracts token usage from various response formats.""" + + @staticmethod + def extract_from_response(response: Any) -> TokenUsage: + """Extract token usage from a generic response object. + + Handles various response formats from different providers. + """ + usage = TokenUsage() + + # Try direct usage attribute + if hasattr(response, "usage"): + usage_data = response.usage + usage = TokenUsageExtractor._extract_from_usage_object(usage_data) + + # Try usage_metadata (Anthropic style) + elif hasattr(response, "usage_metadata"): + usage_data = response.usage_metadata + usage = TokenUsageExtractor._extract_from_usage_object(usage_data) + + # Try token_usage attribute (CrewAI style) + elif hasattr(response, "token_usage"): + usage = TokenUsageExtractor._extract_from_crewai_format(response.token_usage) + + # Try direct attributes on response + elif hasattr(response, "prompt_tokens") or hasattr(response, "completion_tokens"): + usage = TokenUsageExtractor._extract_from_attributes(response) + + return usage + + @staticmethod + def _extract_from_usage_object(usage_data: Any) -> TokenUsage: + """Extract from a usage object with standard attributes.""" + if not usage_data: + return TokenUsage() + + return TokenUsage( + prompt_tokens=getattr(usage_data, "prompt_tokens", None), + completion_tokens=getattr(usage_data, "completion_tokens", None), + total_tokens=getattr(usage_data, "total_tokens", None), + cached_prompt_tokens=getattr(usage_data, "cached_prompt_tokens", None), + cached_read_tokens=getattr(usage_data, "cache_read_input_tokens", None), + reasoning_tokens=getattr(usage_data, "reasoning_tokens", None), + ) + + @staticmethod + def _extract_from_crewai_format(token_usage_str: str) -> TokenUsage: + """Extract from CrewAI's string format (e.g., 'prompt_tokens=100 completion_tokens=50').""" + usage = TokenUsage() + + try: + metrics = {} + for item in str(token_usage_str).split(): + if "=" in item: + key, value = item.split("=") + try: + metrics[key] = int(value) + except ValueError: + pass + + usage.prompt_tokens = metrics.get("prompt_tokens") + usage.completion_tokens = metrics.get("completion_tokens") + usage.total_tokens = metrics.get("total_tokens") + usage.cached_prompt_tokens = metrics.get("cached_prompt_tokens") + + except Exception as e: + logger.debug(f"Failed to parse CrewAI token usage: {e}") + + return usage + + @staticmethod + def _extract_from_attributes(response: Any) -> TokenUsage: + """Extract from direct attributes on the response.""" + return TokenUsage( + prompt_tokens=getattr(response, "prompt_tokens", None), + completion_tokens=getattr(response, "completion_tokens", None), + total_tokens=getattr(response, "total_tokens", None), + ) + + +def calculate_token_efficiency(usage: TokenUsage) -> Optional[float]: + """Calculate token efficiency ratio (completion/prompt). + + Returns: + Efficiency ratio or None if cannot be calculated + """ + if usage.prompt_tokens and usage.completion_tokens and usage.prompt_tokens > 0: + return usage.completion_tokens / usage.prompt_tokens + return None + + +def calculate_cache_efficiency(usage: TokenUsage) -> Optional[float]: + """Calculate cache efficiency ratio (cached/total prompt). + + Returns: + Cache ratio or None if cannot be calculated + """ + if usage.prompt_tokens and usage.cached_prompt_tokens and usage.prompt_tokens > 0: + return usage.cached_prompt_tokens / usage.prompt_tokens + return None + + +def set_token_usage_attributes(span: Any, response: Any): + """Extract and set token usage attributes on a span. + + Args: + span: The span to set attributes on + response: The response object to extract usage from + """ + usage = TokenUsageExtractor.extract_from_response(response) + + # Set basic token attributes + for attr_name, value in usage.to_attributes().items(): + span.set_attribute(attr_name, value) + + # Calculate and set efficiency metrics + efficiency = calculate_token_efficiency(usage) + if efficiency is not None: + span.set_attribute("llm.token_efficiency", f"{efficiency:.4f}") + + cache_efficiency = calculate_cache_efficiency(usage) + if cache_efficiency is not None: + span.set_attribute("llm.cache_efficiency", f"{cache_efficiency:.4f}") diff --git a/agentops/instrumentation/concurrent_futures/instrumentation.py b/agentops/instrumentation/concurrent_futures/instrumentation.py index 71c9b50f0..36771069e 100644 --- a/agentops/instrumentation/concurrent_futures/instrumentation.py +++ b/agentops/instrumentation/concurrent_futures/instrumentation.py @@ -7,12 +7,12 @@ import contextvars import functools -from typing import Any, Callable, Collection, Optional, Tuple, TypeVar +from typing import Any, Callable, Collection, Optional, Tuple, TypeVar, List, Dict from concurrent.futures import ThreadPoolExecutor, Future -from opentelemetry.instrumentation.instrumentor import BaseInstrumentor - +from agentops.instrumentation.common import BaseAgentOpsInstrumentor, InstrumentorConfig +from agentops.instrumentation.common.wrappers import WrapConfig from agentops.logging import logger # Store original methods to restore during uninstrumentation @@ -95,7 +95,7 @@ def wrapped_submit(self: ThreadPoolExecutor, func: Callable[..., R], *args: Any, return wrapped_submit -class ConcurrentFuturesInstrumentor(BaseInstrumentor): +class ConcurrentFuturesInstrumentor(BaseAgentOpsInstrumentor): """ Instrumentor for concurrent.futures module. @@ -104,40 +104,78 @@ class ConcurrentFuturesInstrumentor(BaseInstrumentor): instrumented operations maintain proper trace context. """ + def __init__(self): + """Initialize the concurrent.futures instrumentor.""" + config = InstrumentorConfig( + library_name="agentops.instrumentation.concurrent_futures", + library_version="0.1.0", + wrapped_methods=[], # We handle wrapping manually + metrics_enabled=False, # No metrics needed for context propagation + dependencies=[], + ) + super().__init__(config) + self._original_init = None + self._original_submit = None + def instrumentation_dependencies(self) -> Collection[str]: """Return a list of instrumentation dependencies.""" return [] + def _get_wrapped_methods(self) -> List[WrapConfig]: + """ + Return list of methods to be wrapped. + + For concurrent_futures, we don't use the standard wrapping mechanism + since we're patching methods directly for context propagation. + """ + return [] + + def _create_metrics(self, meter) -> Dict[str, Any]: + """ + Create metrics for this instrumentor. + + This instrumentor doesn't need metrics as it's purely for context propagation. + + Args: + meter: The meter instance (unused) + + Returns: + Empty dict since no metrics are needed + """ + return {} + def _instrument(self, **kwargs: Any) -> None: """Instrument the concurrent.futures module.""" - global _original_init, _original_submit + # Note: We don't call super()._instrument() here because we're not using + # the standard wrapping mechanism for this special instrumentor logger.debug("[ConcurrentFuturesInstrumentor] Starting instrumentation") # Store original methods - _original_init = ThreadPoolExecutor.__init__ - _original_submit = ThreadPoolExecutor.submit + self._original_init = ThreadPoolExecutor.__init__ + self._original_submit = ThreadPoolExecutor.submit # Patch ThreadPoolExecutor methods - ThreadPoolExecutor.__init__ = _context_propagating_init(_original_init) - ThreadPoolExecutor.submit = _context_propagating_submit(_original_submit) + ThreadPoolExecutor.__init__ = _context_propagating_init(self._original_init) + ThreadPoolExecutor.submit = _context_propagating_submit(self._original_submit) logger.info("[ConcurrentFuturesInstrumentor] Successfully instrumented concurrent.futures.ThreadPoolExecutor") def _uninstrument(self, **kwargs: Any) -> None: """Uninstrument the concurrent.futures module.""" - global _original_init, _original_submit + # Note: We don't call super()._uninstrument() here because we're not using + # the standard wrapping mechanism for this special instrumentor logger.debug("[ConcurrentFuturesInstrumentor] Starting uninstrumentation") # Restore original methods - if _original_init: - ThreadPoolExecutor.__init__ = _original_init - _original_init = None + if self._original_init: + ThreadPoolExecutor.__init__ = self._original_init + self._original_init = None - if _original_submit: - ThreadPoolExecutor.submit = _original_submit - _original_submit = None + if self._original_submit: + ThreadPoolExecutor.submit = self._original_submit + self._original_submit = None logger.info("[ConcurrentFuturesInstrumentor] Successfully uninstrumented concurrent.futures.ThreadPoolExecutor") diff --git a/agentops/instrumentation/crewai/instrumentation.py b/agentops/instrumentation/crewai/instrumentation.py index d26fa2a8e..dd0fc69f8 100644 --- a/agentops/instrumentation/crewai/instrumentation.py +++ b/agentops/instrumentation/crewai/instrumentation.py @@ -1,18 +1,26 @@ import os import time import logging -from typing import Collection +from typing import Dict, Any from contextlib import contextmanager -from wrapt import wrap_function_wrapper -from opentelemetry.trace import SpanKind, get_tracer, Tracer, get_current_span -from opentelemetry.trace.status import Status, StatusCode -from opentelemetry.metrics import Histogram, Meter, get_meter +from opentelemetry.trace import SpanKind, get_current_span +from opentelemetry.metrics import Meter from opentelemetry.instrumentation.utils import unwrap -from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT + +from agentops.instrumentation.common import ( + BaseAgentOpsInstrumentor, + InstrumentorConfig, + StandardMetrics, + create_wrapper_factory, + create_span, + SpanAttributeManager, + safe_set_attribute, + set_token_usage_attributes, + TokenUsageExtractor, +) from agentops.instrumentation.crewai.version import __version__ -from agentops.semconv import SpanAttributes, AgentOpsSpanKindValues, Meters, ToolAttributes, MessageAttributes +from agentops.semconv import SpanAttributes, AgentOpsSpanKindValues, ToolAttributes, MessageAttributes from agentops.semconv.core import CoreAttributes from agentops.instrumentation.crewai.crewai_span_attributes import CrewAISpanAttributes, set_span_attribute from agentops import get_client @@ -60,62 +68,78 @@ def attach_tool_executions_to_agent_span(span): del _tool_executions_by_agent[span_id] -class CrewAIInstrumentor(BaseInstrumentor): - def instrumentation_dependencies(self) -> Collection[str]: - return _instruments +class CrewAIInstrumentor(BaseAgentOpsInstrumentor): + """Instrumentor for CrewAI framework.""" + + def __init__(self): + config = InstrumentorConfig( + library_name="crewai", + library_version=__version__, + wrapped_methods=[], # We'll use custom wrapping for CrewAI + metrics_enabled=is_metrics_enabled(), + dependencies=_instruments, + ) + super().__init__(config) + self._attribute_manager = None - def _instrument(self, **kwargs): + def _initialize(self, **kwargs): + """Initialize attribute manager.""" application_name = kwargs.get("application_name", "default_application") environment = kwargs.get("environment", "default_environment") - tracer_provider = kwargs.get("tracer_provider") - tracer = get_tracer(__name__, __version__, tracer_provider) - - meter_provider = kwargs.get("meter_provider") - meter = get_meter(__name__, __version__, meter_provider) - - if is_metrics_enabled(): - ( - token_histogram, - duration_histogram, - ) = _create_metrics(meter) - else: - ( - token_histogram, - duration_histogram, - ) = (None, None) + self._attribute_manager = SpanAttributeManager( + service_name=application_name, deployment_environment=environment + ) + + def _create_metrics(self, meter: Meter) -> Dict[str, Any]: + """Create metrics for CrewAI instrumentation.""" + return StandardMetrics.create_standard_metrics(meter) + + def _custom_wrap(self, **kwargs): + """Perform custom wrapping for CrewAI methods.""" + from wrapt import wrap_function_wrapper + # Get attribute manager for all wrappers + attr_manager = self._attribute_manager + + # Define wrappers using the new create_wrapper_factory wrap_function_wrapper( "crewai.crew", "Crew.kickoff", - wrap_kickoff(tracer, duration_histogram, token_histogram, environment, application_name), + create_wrapper_factory(wrap_kickoff_impl, self._metrics, attr_manager)(self._tracer), ) + wrap_function_wrapper( "crewai.agent", "Agent.execute_task", - wrap_agent_execute_task(tracer, duration_histogram, token_histogram, environment, application_name), + create_wrapper_factory(wrap_agent_execute_task_impl, self._metrics, attr_manager)(self._tracer), ) + wrap_function_wrapper( "crewai.task", "Task.execute_sync", - wrap_task_execute(tracer, duration_histogram, token_histogram, environment, application_name), + create_wrapper_factory(wrap_task_execute_impl, self._metrics, attr_manager)(self._tracer), ) + wrap_function_wrapper( "crewai.llm", "LLM.call", - wrap_llm_call(tracer, duration_histogram, token_histogram, environment, application_name), + create_wrapper_factory(wrap_llm_call_impl, self._metrics, attr_manager)(self._tracer), ) wrap_function_wrapper( "crewai.utilities.tool_utils", "execute_tool_and_check_finality", - wrap_tool_execution(tracer, duration_histogram, environment, application_name), + create_wrapper_factory(wrap_tool_execution_impl, self._metrics, attr_manager)(self._tracer), ) wrap_function_wrapper( - "crewai.tools.tool_usage", "ToolUsage.use", wrap_tool_usage(tracer, environment, application_name) + "crewai.tools.tool_usage", + "ToolUsage.use", + create_wrapper_factory(wrap_tool_usage_impl, self._metrics, attr_manager)(self._tracer), ) - def _uninstrument(self, **kwargs): + def _custom_unwrap(self, **kwargs): + """Perform custom unwrapping for CrewAI methods.""" unwrap("crewai.crew", "Crew.kickoff") unwrap("crewai.agent", "Agent.execute_task") unwrap("crewai.task", "Task.execute_sync") @@ -124,40 +148,9 @@ def _uninstrument(self, **kwargs): unwrap("crewai.tools.tool_usage", "ToolUsage.use") -def with_tracer_wrapper(func): - """Helper for providing tracer for wrapper functions.""" - - def _with_tracer(tracer, duration_histogram, token_histogram, environment, application_name): - def wrapper(wrapped, instance, args, kwargs): - return func( - tracer, - duration_histogram, - token_histogram, - environment, - application_name, - wrapped, - instance, - args, - kwargs, - ) - - return wrapper - - return _with_tracer - - -@with_tracer_wrapper -def wrap_kickoff( - tracer: Tracer, - duration_histogram: Histogram, - token_histogram: Histogram, - environment, - application_name, - wrapped, - instance, - args, - kwargs, -): +# Implementation functions for wrappers +def wrap_kickoff_impl(tracer, metrics, attr_manager, wrapped, instance, args, kwargs): + """Implementation of kickoff wrapper.""" logger.debug( f"CrewAI: Starting workflow instrumentation for Crew with {len(getattr(instance, 'agents', []))} agents" ) @@ -174,201 +167,177 @@ def wrap_kickoff( # Use trace_name from config if available, otherwise default to "crewai.workflow" span_name = config.trace_name if config.trace_name else "crewai.workflow" - with tracer.start_as_current_span( - span_name, - kind=SpanKind.INTERNAL, - attributes=attributes, + with create_span( + tracer, span_name, kind=SpanKind.INTERNAL, attributes=attributes, attribute_manager=attr_manager ) as span: - try: - span.set_attribute(TELEMETRY_SDK_NAME, "agentops") - span.set_attribute(SERVICE_NAME, application_name) - span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment) + logger.debug("CrewAI: Processing crew instance attributes") - logger.debug("CrewAI: Processing crew instance attributes") + # First set general crew attributes but skip agent processing + crew_attrs = CrewAISpanAttributes(span=span, instance=instance, skip_agent_processing=True) - # First set general crew attributes but skip agent processing - crew_attrs = CrewAISpanAttributes(span=span, instance=instance, skip_agent_processing=True) + # Prioritize agent processing before task execution + if hasattr(instance, "agents") and instance.agents: + logger.debug(f"CrewAI: Explicitly processing {len(instance.agents)} agents before task execution") + crew_attrs._parse_agents(instance.agents) - # Prioritize agent processing before task execution - if hasattr(instance, "agents") and instance.agents: - logger.debug(f"CrewAI: Explicitly processing {len(instance.agents)} agents before task execution") - crew_attrs._parse_agents(instance.agents) + logger.debug("CrewAI: Executing wrapped crew kickoff function") + result = wrapped(*args, **kwargs) - logger.debug("CrewAI: Executing wrapped crew kickoff function") - result = wrapped(*args, **kwargs) + if result: + class_name = instance.__class__.__name__ + span.set_attribute(f"crewai.{class_name.lower()}.result", str(result)) - if result: - class_name = instance.__class__.__name__ - span.set_attribute(f"crewai.{class_name.lower()}.result", str(result)) - span.set_status(Status(StatusCode.OK)) - if class_name == "Crew": - if hasattr(result, "usage_metrics"): - span.set_attribute("crewai.crew.usage_metrics", str(getattr(result, "usage_metrics"))) - - if hasattr(result, "tasks_output") and result.tasks_output: - span.set_attribute("crewai.crew.tasks_output", str(result.tasks_output)) - - try: - task_details_by_description = {} - if hasattr(instance, "tasks"): - for task in instance.tasks: - if task is not None: - agent_id = "" - agent_role = "" - if hasattr(task, "agent") and task.agent: - agent_id = str(getattr(task.agent, "id", "")) - agent_role = getattr(task.agent, "role", "") - - tools = [] - if hasattr(task, "tools") and task.tools: - for tool in task.tools: - tool_info = {} - if hasattr(tool, "name"): - tool_info["name"] = tool.name - if hasattr(tool, "description"): - tool_info["description"] = tool.description - if tool_info: - tools.append(tool_info) - - task_details_by_description[task.description] = { - "agent_id": agent_id, - "agent_role": agent_role, - "async_execution": getattr(task, "async_execution", False), - "human_input": getattr(task, "human_input", False), - "output_file": getattr(task, "output_file", ""), - "tools": tools, - } - - for idx, task_output in enumerate(result.tasks_output): - task_prefix = f"crewai.crew.tasks.{idx}" - - task_attrs = { - "description": getattr(task_output, "description", ""), - "name": getattr(task_output, "name", ""), - "expected_output": getattr(task_output, "expected_output", ""), - "summary": getattr(task_output, "summary", ""), - "raw": getattr(task_output, "raw", ""), - "agent": getattr(task_output, "agent", ""), - "output_format": str(getattr(task_output, "output_format", "")), - } - - for attr_name, attr_value in task_attrs.items(): - if attr_value: - if attr_name == "raw" and len(str(attr_value)) > 1000: - attr_value = str(attr_value)[:997] + "..." - span.set_attribute(f"{task_prefix}.{attr_name}", str(attr_value)) - - span.set_attribute(f"{task_prefix}.status", "completed") - span.set_attribute(f"{task_prefix}.id", str(idx)) - - description = task_attrs.get("description", "") - if description and description in task_details_by_description: - details = task_details_by_description[description] - - span.set_attribute(f"{task_prefix}.agent_id", details["agent_id"]) - span.set_attribute( - f"{task_prefix}.async_execution", str(details["async_execution"]) - ) - span.set_attribute(f"{task_prefix}.human_input", str(details["human_input"])) - - if details["output_file"]: - span.set_attribute(f"{task_prefix}.output_file", details["output_file"]) - - for tool_idx, tool in enumerate(details["tools"]): - for tool_key, tool_value in tool.items(): - span.set_attribute( - f"{task_prefix}.tools.{tool_idx}.{tool_key}", str(tool_value) - ) - except Exception as ex: - logger.warning(f"Failed to parse task outputs: {ex}") - - if hasattr(result, "token_usage"): - token_usage = str(getattr(result, "token_usage")) - span.set_attribute("crewai.crew.token_usage", token_usage) - - try: - metrics = {} - for item in token_usage.split(): - if "=" in item: - key, value = item.split("=") - try: - metrics[key] = int(value) - except ValueError: - metrics[key] = value - - if "total_tokens" in metrics: - span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, metrics["total_tokens"]) - if "prompt_tokens" in metrics: - span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, metrics["prompt_tokens"]) - if "completion_tokens" in metrics: - span.set_attribute( - SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, metrics["completion_tokens"] - ) - if "cached_prompt_tokens" in metrics: - span.set_attribute( - SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS, metrics["cached_prompt_tokens"] - ) - if "successful_requests" in metrics: - span.set_attribute("crewai.crew.successful_requests", metrics["successful_requests"]) - - if ( - "prompt_tokens" in metrics - and "completion_tokens" in metrics - and metrics["prompt_tokens"] > 0 - ): - efficiency = metrics["completion_tokens"] / metrics["prompt_tokens"] - span.set_attribute("crewai.crew.token_efficiency", f"{efficiency:.4f}") - - if ( - "cached_prompt_tokens" in metrics - and "prompt_tokens" in metrics - and metrics["prompt_tokens"] > 0 - ): - cache_ratio = metrics["cached_prompt_tokens"] / metrics["prompt_tokens"] - span.set_attribute("crewai.crew.cache_efficiency", f"{cache_ratio:.4f}") - except Exception as ex: - logger.warning(f"Failed to parse token usage metrics: {ex}") - return result + if class_name == "Crew": + _process_crew_result(span, instance, result) + + # Set token usage using common utilities + set_token_usage_attributes(span, result) + _calculate_efficiency_metrics(span, result) + + return result + + +def _process_crew_result(span, instance, result): + """Process crew execution result.""" + if hasattr(result, "usage_metrics"): + span.set_attribute("crewai.crew.usage_metrics", str(getattr(result, "usage_metrics"))) + + if hasattr(result, "tasks_output") and result.tasks_output: + span.set_attribute("crewai.crew.tasks_output", str(result.tasks_output)) + + try: + task_details_by_description = _build_task_details_map(instance) + _process_task_outputs(span, result.tasks_output, task_details_by_description) except Exception as ex: - span.set_status(Status(StatusCode.ERROR, str(ex))) - logger.error("Error in trace creation: %s", ex) - raise + logger.warning(f"Failed to parse task outputs: {ex}") + + +def _build_task_details_map(instance): + """Build a map of task descriptions to task details.""" + task_details_by_description = {} + if hasattr(instance, "tasks"): + for task in instance.tasks: + if task is not None: + agent_id = "" + agent_role = "" + if hasattr(task, "agent") and task.agent: + agent_id = str(getattr(task.agent, "id", "")) + agent_role = getattr(task.agent, "role", "") + + tools = [] + if hasattr(task, "tools") and task.tools: + for tool in task.tools: + tool_info = {} + if hasattr(tool, "name"): + tool_info["name"] = tool.name + if hasattr(tool, "description"): + tool_info["description"] = tool.description + if tool_info: + tools.append(tool_info) + + task_details_by_description[task.description] = { + "agent_id": agent_id, + "agent_role": agent_role, + "async_execution": getattr(task, "async_execution", False), + "human_input": getattr(task, "human_input", False), + "output_file": getattr(task, "output_file", ""), + "tools": tools, + } + return task_details_by_description + + +def _process_task_outputs(span, tasks_output, task_details_by_description): + """Process task outputs and set attributes.""" + for idx, task_output in enumerate(tasks_output): + task_prefix = f"crewai.crew.tasks.{idx}" + + task_attrs = { + "description": getattr(task_output, "description", ""), + "name": getattr(task_output, "name", ""), + "expected_output": getattr(task_output, "expected_output", ""), + "summary": getattr(task_output, "summary", ""), + "raw": getattr(task_output, "raw", ""), + "agent": getattr(task_output, "agent", ""), + "output_format": str(getattr(task_output, "output_format", "")), + } + + for attr_name, attr_value in task_attrs.items(): + if attr_value: + safe_set_attribute(span, f"{task_prefix}.{attr_name}", attr_value, max_length=1000) + + span.set_attribute(f"{task_prefix}.status", "completed") + span.set_attribute(f"{task_prefix}.id", str(idx)) + + description = task_attrs.get("description", "") + if description and description in task_details_by_description: + details = task_details_by_description[description] + + span.set_attribute(f"{task_prefix}.agent_id", details["agent_id"]) + span.set_attribute(f"{task_prefix}.async_execution", str(details["async_execution"])) + span.set_attribute(f"{task_prefix}.human_input", str(details["human_input"])) + + if details["output_file"]: + span.set_attribute(f"{task_prefix}.output_file", details["output_file"]) + + for tool_idx, tool in enumerate(details["tools"]): + for tool_key, tool_value in tool.items(): + span.set_attribute(f"{task_prefix}.tools.{tool_idx}.{tool_key}", str(tool_value)) + + +def _calculate_efficiency_metrics(span, result): + """Calculate and set efficiency metrics.""" + if hasattr(result, "token_usage"): + try: + usage = TokenUsageExtractor.extract_from_response(result) + + # Calculate efficiency + if usage.prompt_tokens and usage.completion_tokens and usage.prompt_tokens > 0: + efficiency = usage.completion_tokens / usage.prompt_tokens + span.set_attribute("crewai.crew.token_efficiency", f"{efficiency:.4f}") + # Calculate cache efficiency + if usage.cached_prompt_tokens and usage.prompt_tokens and usage.prompt_tokens > 0: + cache_ratio = usage.cached_prompt_tokens / usage.prompt_tokens + span.set_attribute("crewai.crew.cache_efficiency", f"{cache_ratio:.4f}") -@with_tracer_wrapper -def wrap_agent_execute_task( - tracer, duration_histogram, token_histogram, environment, application_name, wrapped, instance, args, kwargs -): + except Exception as ex: + logger.warning(f"Failed to calculate efficiency metrics: {ex}") + + +def wrap_agent_execute_task_impl(tracer, metrics, attr_manager, wrapped, instance, args, kwargs): + """Implementation of agent execute task wrapper.""" agent_name = instance.role if hasattr(instance, "role") else "agent" - with tracer.start_as_current_span( + + with create_span( + tracer, f"{agent_name}.agent", kind=SpanKind.CLIENT, attributes={ SpanAttributes.AGENTOPS_SPAN_KIND: AgentOpsSpanKindValues.AGENT.value, }, + attribute_manager=attr_manager, ) as span: - try: - span.set_attribute(TELEMETRY_SDK_NAME, "agentops") - span.set_attribute(SERVICE_NAME, application_name) - span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment) - - CrewAISpanAttributes(span=span, instance=instance) + CrewAISpanAttributes(span=span, instance=instance) - result = wrapped(*args, **kwargs) + result = wrapped(*args, **kwargs) - attach_tool_executions_to_agent_span(span) + attach_tool_executions_to_agent_span(span) - if token_histogram and hasattr(instance, "_token_process"): - token_histogram.record( - instance._token_process.get_summary().prompt_tokens, + # Record token metrics if available + if metrics.get("token_histogram") and hasattr(instance, "_token_process"): + token_process = instance._token_process.get_summary() + if hasattr(token_process, "prompt_tokens"): + metrics["token_histogram"].record( + token_process.prompt_tokens, attributes={ SpanAttributes.LLM_SYSTEM: "crewai", SpanAttributes.LLM_TOKEN_TYPE: "input", SpanAttributes.LLM_RESPONSE_MODEL: str(instance.llm.model), }, ) - token_histogram.record( - instance._token_process.get_summary().completion_tokens, + if hasattr(token_process, "completion_tokens"): + metrics["token_histogram"].record( + token_process.completion_tokens, attributes={ SpanAttributes.LLM_SYSTEM: "crewai", SpanAttributes.LLM_TOKEN_TYPE: "output", @@ -376,22 +345,15 @@ def wrap_agent_execute_task( }, ) - if hasattr(instance, "llm") and hasattr(instance.llm, "model"): - set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, str(instance.llm.model)) - set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, str(instance.llm.model)) + if hasattr(instance, "llm") and hasattr(instance.llm, "model"): + set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, str(instance.llm.model)) + set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, str(instance.llm.model)) - span.set_status(Status(StatusCode.OK)) - return result - except Exception as ex: - span.set_status(Status(StatusCode.ERROR, str(ex))) - logger.error("Error in trace creation: %s", ex) - raise + return result -@with_tracer_wrapper -def wrap_task_execute( - tracer, duration_histogram, token_histogram, environment, application_name, wrapped, instance, args, kwargs -): +def wrap_task_execute_impl(tracer, metrics, attr_manager, wrapped, instance, args, kwargs): + """Implementation of task execute wrapper.""" task_name = instance.description if hasattr(instance, "description") else "task" config = get_client().config @@ -401,245 +363,173 @@ def wrap_task_execute( if config.default_tags and len(config.default_tags) > 0: tag_list = list(config.default_tags) - # TODO: This should be a set to prevent duplicates, but we need to ensure - # that the tags are not modified in place, so we convert to list first. attributes[CoreAttributes.TAGS] = tag_list - with tracer.start_as_current_span( - f"{task_name}.task", - kind=SpanKind.CLIENT, - attributes=attributes, + with create_span( + tracer, f"{task_name}.task", kind=SpanKind.CLIENT, attributes=attributes, attribute_manager=attr_manager ) as span: - try: - span.set_attribute(TELEMETRY_SDK_NAME, "agentops") - span.set_attribute(SERVICE_NAME, application_name) - span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment) + CrewAISpanAttributes(span=span, instance=instance) - CrewAISpanAttributes(span=span, instance=instance) + result = wrapped(*args, **kwargs) - result = wrapped(*args, **kwargs) - - set_span_attribute(span, SpanAttributes.AGENTOPS_ENTITY_OUTPUT, str(result)) - span.set_status(Status(StatusCode.OK)) - return result - except Exception as ex: - span.set_status(Status(StatusCode.ERROR, str(ex))) - logger.error("Error in trace creation: %s", ex) - raise + set_span_attribute(span, SpanAttributes.AGENTOPS_ENTITY_OUTPUT, str(result)) + return result -@with_tracer_wrapper -def wrap_llm_call( - tracer, duration_histogram, token_histogram, environment, application_name, wrapped, instance, args, kwargs -): +def wrap_llm_call_impl(tracer, metrics, attr_manager, wrapped, instance, args, kwargs): + """Implementation of LLM call wrapper.""" llm = instance.model if hasattr(instance, "model") else "llm" - with tracer.start_as_current_span(f"{llm}.llm", kind=SpanKind.CLIENT, attributes={}) as span: - start_time = time.time() - try: - span.set_attribute(TELEMETRY_SDK_NAME, "agentops") - span.set_attribute(SERVICE_NAME, application_name) - span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment) - - CrewAISpanAttributes(span=span, instance=instance) - - result = wrapped(*args, **kwargs) + start_time = time.time() + + with create_span(tracer, f"{llm}.llm", kind=SpanKind.CLIENT, attribute_manager=attr_manager) as span: + CrewAISpanAttributes(span=span, instance=instance) + + result = wrapped(*args, **kwargs) + + # Set prompt attributes from args + if args and isinstance(args[0], list): + for i, message in enumerate(args[0]): + if isinstance(message, dict): + if "role" in message: + span.set_attribute(MessageAttributes.PROMPT_ROLE.format(i=i), message["role"]) + if "content" in message: + span.set_attribute(MessageAttributes.PROMPT_CONTENT.format(i=i), message["content"]) + + # Set completion attributes from result + if result: + span.set_attribute(MessageAttributes.COMPLETION_CONTENT.format(i=0), str(result)) + span.set_attribute(MessageAttributes.COMPLETION_ROLE.format(i=0), "assistant") + + # Set token usage attributes from callbacks + if "callbacks" in kwargs and kwargs["callbacks"] and hasattr(kwargs["callbacks"][0], "token_cost_process"): + token_process = kwargs["callbacks"][0].token_cost_process + if hasattr(token_process, "completion_tokens"): + span.set_attribute(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, token_process.completion_tokens) + if hasattr(token_process, "prompt_tokens"): + span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, token_process.prompt_tokens) + if hasattr(token_process, "total_tokens"): + span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, token_process.total_tokens) + + # Record duration metric + if metrics.get("duration_histogram"): + duration = time.time() - start_time + metrics["duration_histogram"].record( + duration, + attributes={ + SpanAttributes.LLM_SYSTEM: "crewai", + SpanAttributes.LLM_RESPONSE_MODEL: str(instance.model), + }, + ) - # Set prompt attributes from args - if args and isinstance(args[0], list): - for i, message in enumerate(args[0]): - if isinstance(message, dict): - if "role" in message: - span.set_attribute(MessageAttributes.PROMPT_ROLE.format(i=i), message["role"]) - if "content" in message: - span.set_attribute(MessageAttributes.PROMPT_CONTENT.format(i=i), message["content"]) - - # Set completion attributes from result - if result: - span.set_attribute(MessageAttributes.COMPLETION_CONTENT.format(i=0), str(result)) - span.set_attribute(MessageAttributes.COMPLETION_ROLE.format(i=0), "assistant") - - # Set token usage attributes from callbacks - if "callbacks" in kwargs and kwargs["callbacks"] and hasattr(kwargs["callbacks"][0], "token_cost_process"): - token_process = kwargs["callbacks"][0].token_cost_process - if hasattr(token_process, "completion_tokens"): - span.set_attribute(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, token_process.completion_tokens) - if hasattr(token_process, "prompt_tokens"): - span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, token_process.prompt_tokens) - if hasattr(token_process, "total_tokens"): - span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, token_process.total_tokens) - - if duration_histogram: - duration = time.time() - start_time - duration_histogram.record( - duration, - attributes={ - SpanAttributes.LLM_SYSTEM: "crewai", - SpanAttributes.LLM_RESPONSE_MODEL: str(instance.model), - }, - ) + return result - span.set_status(Status(StatusCode.OK)) - return result - except Exception as ex: - span.set_status(Status(StatusCode.ERROR, str(ex))) - logger.error("Error in trace creation: %s", ex) - raise +def wrap_tool_execution_impl(tracer, metrics, attr_manager, wrapped, instance, args, kwargs): + """Implementation of tool execution wrapper.""" + agent_action = args[0] if args else None + tools = args[1] if len(args) > 1 else [] -def wrap_tool_execution(tracer, duration_histogram, environment, application_name): - """Wrapper for tool execution function.""" + if not agent_action: + return wrapped(*args, **kwargs) - def wrapper(wrapped, instance, args, kwargs): - agent_action = args[0] if args else None - tools = args[1] if len(args) > 1 else [] + tool_name = getattr(agent_action, "tool", "unknown_tool") + tool_input = getattr(agent_action, "tool_input", "") - if not agent_action: - return wrapped(*args, **kwargs) + with store_tool_execution() as tool_details: + tool_details["name"] = tool_name + tool_details["parameters"] = str(tool_input) - tool_name = getattr(agent_action, "tool", "unknown_tool") - tool_input = getattr(agent_action, "tool_input", "") + matching_tool = next((tool for tool in tools if hasattr(tool, "name") and tool.name == tool_name), None) + if matching_tool and hasattr(matching_tool, "description"): + tool_details["description"] = str(matching_tool.description) - with store_tool_execution() as tool_details: - tool_details["name"] = tool_name - tool_details["parameters"] = str(tool_input) + start_time = time.time() - matching_tool = next((tool for tool in tools if hasattr(tool, "name") and tool.name == tool_name), None) + with create_span( + tracer, + f"{tool_name}.tool", + kind=SpanKind.CLIENT, + attributes={ + SpanAttributes.AGENTOPS_SPAN_KIND: "tool", + ToolAttributes.TOOL_NAME: tool_name, + ToolAttributes.TOOL_PARAMETERS: str(tool_input), + }, + attribute_manager=attr_manager, + ) as span: if matching_tool and hasattr(matching_tool, "description"): - tool_details["description"] = str(matching_tool.description) + span.set_attribute(ToolAttributes.TOOL_DESCRIPTION, str(matching_tool.description)) - with tracer.start_as_current_span( - f"{tool_name}.tool", - kind=SpanKind.CLIENT, - attributes={ - SpanAttributes.AGENTOPS_SPAN_KIND: "tool", - ToolAttributes.TOOL_NAME: tool_name, - ToolAttributes.TOOL_PARAMETERS: str(tool_input), - }, - ) as span: - start_time = time.time() - try: - span.set_attribute(TELEMETRY_SDK_NAME, "agentops") - span.set_attribute(SERVICE_NAME, application_name) - span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment) - - if matching_tool and hasattr(matching_tool, "description"): - span.set_attribute(ToolAttributes.TOOL_DESCRIPTION, str(matching_tool.description)) - - result = wrapped(*args, **kwargs) - - if duration_histogram: - duration = time.time() - start_time - duration_histogram.record( - duration, - attributes={ - SpanAttributes.LLM_SYSTEM: "crewai", - ToolAttributes.TOOL_NAME: tool_name, - }, - ) - - if hasattr(result, "result"): - tool_result = str(result.result) - span.set_attribute(ToolAttributes.TOOL_RESULT, tool_result) - tool_details["result"] = tool_result + result = wrapped(*args, **kwargs) - tool_status = "success" if not hasattr(result, "error") or not result.error else "error" - span.set_attribute(ToolAttributes.TOOL_STATUS, tool_status) - tool_details["status"] = tool_status + # Record duration metric + if metrics.get("duration_histogram"): + duration = time.time() - start_time + metrics["duration_histogram"].record( + duration, + attributes={ + SpanAttributes.LLM_SYSTEM: "crewai", + ToolAttributes.TOOL_NAME: tool_name, + }, + ) - if hasattr(result, "error") and result.error: - tool_details["error"] = str(result.error) + if hasattr(result, "result"): + tool_result = str(result.result) + span.set_attribute(ToolAttributes.TOOL_RESULT, tool_result) + tool_details["result"] = tool_result - duration = time.time() - start_time - tool_details["duration"] = f"{duration:.3f}" + tool_status = "success" if not hasattr(result, "error") or not result.error else "error" + span.set_attribute(ToolAttributes.TOOL_STATUS, tool_status) + tool_details["status"] = tool_status - span.set_status(Status(StatusCode.OK)) - return result - except Exception as ex: - tool_status = "error" - span.set_attribute(ToolAttributes.TOOL_STATUS, tool_status) - tool_details["status"] = tool_status - tool_details["error"] = str(ex) + if hasattr(result, "error") and result.error: + tool_details["error"] = str(result.error) - span.set_status(Status(StatusCode.ERROR, str(ex))) - logger.error(f"Error in tool execution trace: {ex}") - raise + duration = time.time() - start_time + tool_details["duration"] = f"{duration:.3f}" - return wrapper + return result -def wrap_tool_usage(tracer, environment, application_name): - """Wrapper for ToolUsage.use method.""" +def wrap_tool_usage_impl(tracer, metrics, attr_manager, wrapped, instance, args, kwargs): + """Implementation of tool usage wrapper.""" + calling = args[0] if args else None - def wrapper(wrapped, instance, args, kwargs): - calling = args[0] if args else None + if not calling: + return wrapped(*args, **kwargs) - if not calling: - return wrapped(*args, **kwargs) + tool_name = getattr(calling, "tool_name", "unknown_tool") - tool_name = getattr(calling, "tool_name", "unknown_tool") + with store_tool_execution() as tool_details: + tool_details["name"] = tool_name - with store_tool_execution() as tool_details: - tool_details["name"] = tool_name + if hasattr(calling, "arguments") and calling.arguments: + tool_details["parameters"] = str(calling.arguments) + with create_span( + tracer, + f"{tool_name}.tool_usage", + kind=SpanKind.INTERNAL, + attributes={ + SpanAttributes.AGENTOPS_SPAN_KIND: "tool.usage", + ToolAttributes.TOOL_NAME: tool_name, + }, + attribute_manager=attr_manager, + ) as span: if hasattr(calling, "arguments") and calling.arguments: - tool_details["parameters"] = str(calling.arguments) - - with tracer.start_as_current_span( - f"{tool_name}.tool_usage", - kind=SpanKind.INTERNAL, - attributes={ - SpanAttributes.AGENTOPS_SPAN_KIND: "tool.usage", - ToolAttributes.TOOL_NAME: tool_name, - }, - ) as span: - try: - span.set_attribute(TELEMETRY_SDK_NAME, "agentops") - span.set_attribute(SERVICE_NAME, application_name) - span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment) - - if hasattr(calling, "arguments") and calling.arguments: - span.set_attribute(ToolAttributes.TOOL_PARAMETERS, str(calling.arguments)) + span.set_attribute(ToolAttributes.TOOL_PARAMETERS, str(calling.arguments)) - result = wrapped(*args, **kwargs) - - tool_result = str(result) - span.set_attribute(ToolAttributes.TOOL_RESULT, tool_result) - tool_details["result"] = tool_result - - tool_status = "success" - span.set_attribute(ToolAttributes.TOOL_STATUS, tool_status) - tool_details["status"] = tool_status + result = wrapped(*args, **kwargs) - span.set_status(Status(StatusCode.OK)) - return result - except Exception as ex: - tool_status = "error" - span.set_attribute(ToolAttributes.TOOL_STATUS, tool_status) - tool_details["status"] = tool_status - tool_details["error"] = str(ex) + tool_result = str(result) + span.set_attribute(ToolAttributes.TOOL_RESULT, tool_result) + tool_details["result"] = tool_result - span.set_status(Status(StatusCode.ERROR, str(ex))) - logger.error(f"Error in tool usage trace: {ex}") - raise + tool_status = "success" + span.set_attribute(ToolAttributes.TOOL_STATUS, tool_status) + tool_details["status"] = tool_status - return wrapper + return result def is_metrics_enabled() -> bool: return (os.getenv("AGENTOPS_METRICS_ENABLED") or "true").lower() == "true" - - -def _create_metrics(meter: Meter): - token_histogram = meter.create_histogram( - name=Meters.LLM_TOKEN_USAGE, - unit="token", - description="Measures number of input and output tokens used", - ) - - duration_histogram = meter.create_histogram( - name=Meters.LLM_OPERATION_DURATION, - unit="s", - description="GenAI operation duration", - ) - - return token_histogram, duration_histogram diff --git a/agentops/instrumentation/google_adk/instrumentor.py b/agentops/instrumentation/google_adk/instrumentor.py index 000b58073..53f81f76b 100644 --- a/agentops/instrumentation/google_adk/instrumentor.py +++ b/agentops/instrumentation/google_adk/instrumentor.py @@ -7,18 +7,19 @@ 3. Extract and properly index LLM messages and tool calls """ -from typing import Collection -from opentelemetry.trace import get_tracer -from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.metrics import get_meter +from typing import Collection, List from agentops.logging import logger -from agentops.instrumentation.google_adk import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics +from agentops.instrumentation.common.wrappers import WrapConfig from agentops.instrumentation.google_adk.patch import patch_adk, unpatch_adk -from agentops.semconv import Meters +# Library info for tracer/meter +LIBRARY_NAME = "agentops.instrumentation.google_adk" +LIBRARY_VERSION = "0.1.0" -class GoogleADKInstrumentor(BaseInstrumentor): + +class GoogleADKInstrumentor(BaseAgentOpsInstrumentor): """An instrumentor for Google Agent Development Kit (ADK). This instrumentor patches Google ADK to: @@ -27,10 +28,27 @@ class GoogleADKInstrumentor(BaseInstrumentor): - Properly extract and index message content and tool interactions """ + def __init__(self): + """Initialize the Google ADK instrumentor.""" + super().__init__( + name="google_adk", + version=LIBRARY_VERSION, + library_name=LIBRARY_NAME, + ) + def instrumentation_dependencies(self) -> Collection[str]: """Return packages required for instrumentation.""" return ["google-adk >= 0.1.0"] + def _get_wrapped_methods(self) -> List[WrapConfig]: + """ + Return list of methods to be wrapped. + + For Google ADK, we don't use the standard wrapping mechanism + since we're using a patching approach instead. + """ + return [] + def _instrument(self, **kwargs): """Instrument the Google ADK. @@ -39,34 +57,23 @@ def _instrument(self, **kwargs): 2. Patches key ADK methods to create AgentOps spans 3. Sets up metrics for tracking token usage and operation duration """ - # Set up tracer and meter - tracer_provider = kwargs.get("tracer_provider") - tracer = get_tracer(LIBRARY_NAME, LIBRARY_VERSION, tracer_provider) - - meter_provider = kwargs.get("meter_provider") - meter = get_meter(LIBRARY_NAME, LIBRARY_VERSION, meter_provider) - - # Create metrics - meter.create_histogram( - name=Meters.LLM_TOKEN_USAGE, - unit="token", - description="Measures number of input and output tokens used with Google ADK", - ) + # Note: We don't call super()._instrument() here because we're not using + # the standard wrapping mechanism for this special instrumentor - meter.create_histogram( - name=Meters.LLM_OPERATION_DURATION, - unit="s", - description="Google ADK operation duration", - ) + # Get tracer and meter from base class + self._tracer_provider = kwargs.get("tracer_provider") + self._meter_provider = kwargs.get("meter_provider") - meter.create_counter( - name=Meters.LLM_COMPLETIONS_EXCEPTIONS, - unit="time", - description="Number of exceptions occurred during Google ADK operations", - ) + # Initialize tracer and meter (these are set by base class properties) + _ = self._tracer + _ = self._meter + + # Create standard metrics for LLM operations + self._metrics = StandardMetrics(self._meter) + self._metrics.create_llm_metrics(system_name="Google ADK", operation_description="Google ADK operation") - # Apply patches - patch_adk(tracer) + # Apply patches with our tracer + patch_adk(self._tracer) logger.info("Google ADK instrumentation enabled") def _uninstrument(self, **kwargs): @@ -74,5 +81,8 @@ def _uninstrument(self, **kwargs): This method removes all patches and restores ADK's original behavior. """ + # Note: We don't call super()._uninstrument() here because we're not using + # the standard wrapping mechanism for this special instrumentor + unpatch_adk() logger.info("Google ADK instrumentation disabled") diff --git a/agentops/instrumentation/google_genai/instrumentor.py b/agentops/instrumentation/google_genai/instrumentor.py index 023cd5add..6c242c621 100644 --- a/agentops/instrumentation/google_genai/instrumentor.py +++ b/agentops/instrumentation/google_genai/instrumentor.py @@ -9,14 +9,11 @@ """ from typing import List, Collection -from opentelemetry.trace import get_tracer -from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.metrics import get_meter from wrapt import wrap_function_wrapper from agentops.logging import logger -from agentops.instrumentation.common.wrappers import WrapConfig, wrap, unwrap -from agentops.instrumentation.google_genai import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics +from agentops.instrumentation.common.wrappers import WrapConfig from agentops.instrumentation.google_genai.attributes.model import ( get_generate_content_attributes, get_token_counting_attributes, @@ -25,7 +22,10 @@ generate_content_stream_wrapper, generate_content_stream_async_wrapper, ) -from agentops.semconv import Meters + +# Library info for tracer/meter +LIBRARY_NAME = "agentops.instrumentation.google_genai" +LIBRARY_VERSION = "0.1.0" # Methods to wrap for instrumentation WRAPPED_METHODS: List[WrapConfig] = [ @@ -96,7 +96,7 @@ ] -class GoogleGenAIInstrumentor(BaseInstrumentor): +class GoogleGenAIInstrumentor(BaseAgentOpsInstrumentor): """An instrumentor for Google Generative AI (Gemini) API. This class provides instrumentation for Google's Generative AI API by wrapping key methods @@ -106,6 +106,14 @@ class GoogleGenAIInstrumentor(BaseInstrumentor): It captures metrics including token usage, operation duration, and exceptions. """ + def __init__(self): + """Initialize the Google GenAI instrumentor.""" + super().__init__( + name="google_genai", + version=LIBRARY_VERSION, + library_name=LIBRARY_NAME, + ) + def instrumentation_dependencies(self) -> Collection[str]: """Return packages required for instrumentation. @@ -114,6 +122,10 @@ def instrumentation_dependencies(self) -> Collection[str]: """ return ["google-genai >= 0.1.0"] + def _get_wrapped_methods(self) -> List[WrapConfig]: + """Return list of methods to be wrapped.""" + return WRAPPED_METHODS + def _instrument(self, **kwargs): """Instrument the Google Generative AI API. @@ -124,50 +136,31 @@ def _instrument(self, **kwargs): Args: **kwargs: Configuration options for instrumentation. """ - tracer_provider = kwargs.get("tracer_provider") - tracer = get_tracer(LIBRARY_NAME, LIBRARY_VERSION, tracer_provider) - - meter_provider = kwargs.get("meter_provider") - meter = get_meter(LIBRARY_NAME, LIBRARY_VERSION, meter_provider) + # Call parent implementation to handle standard method wrapping + super()._instrument(**kwargs) - meter.create_histogram( - name=Meters.LLM_TOKEN_USAGE, - unit="token", - description="Measures number of input and output tokens used with Google Generative AI models", + # Create standard metrics for LLM operations + self._metrics = StandardMetrics(self._meter) + self._metrics.create_llm_metrics( + system_name="Google Generative AI", operation_description="Google Generative AI operation" ) - meter.create_histogram( - name=Meters.LLM_OPERATION_DURATION, - unit="s", - description="Google Generative AI operation duration", - ) - - meter.create_counter( - name=Meters.LLM_COMPLETIONS_EXCEPTIONS, - unit="time", - description="Number of exceptions occurred during Google Generative AI completions", - ) - - # Standard method wrapping approach for regular methods - for wrap_config in WRAPPED_METHODS: - try: - wrap(wrap_config, tracer) - except (AttributeError, ModuleNotFoundError) as e: - logger.debug( - f"Could not wrap {wrap_config.package}.{wrap_config.class_name}.{wrap_config.method_name}: {e}" - ) - # Special handling for streaming responses for stream_method in STREAMING_METHODS: try: wrap_function_wrapper( stream_method["module"], stream_method["class_method"], - stream_method["wrapper"](tracer), + stream_method["wrapper"](self._tracer), + ) + logger.debug( + f"Successfully wrapped streaming method {stream_method['module']}.{stream_method['class_method']}" ) except (AttributeError, ModuleNotFoundError) as e: logger.debug(f"Failed to wrap {stream_method['module']}.{stream_method['class_method']}: {e}") + logger.info("Google Generative AI instrumentation enabled") + def _uninstrument(self, **kwargs): """Remove instrumentation from Google Generative AI API. @@ -177,14 +170,8 @@ def _uninstrument(self, **kwargs): Args: **kwargs: Configuration options for uninstrumentation. """ - # Unwrap standard methods - for wrap_config in WRAPPED_METHODS: - try: - unwrap(wrap_config) - except Exception as e: - logger.debug( - f"Failed to unwrap {wrap_config.package}.{wrap_config.class_name}.{wrap_config.method_name}: {e}" - ) + # Call parent implementation to handle standard method unwrapping + super()._uninstrument(**kwargs) # Unwrap streaming methods from opentelemetry.instrumentation.utils import unwrap as otel_unwrap @@ -195,3 +182,5 @@ def _uninstrument(self, **kwargs): logger.debug(f"Unwrapped streaming method {stream_method['module']}.{stream_method['class_method']}") except (AttributeError, ModuleNotFoundError) as e: logger.debug(f"Failed to unwrap {stream_method['module']}.{stream_method['class_method']}: {e}") + + logger.info("Google Generative AI instrumentation disabled") diff --git a/agentops/instrumentation/ibm_watsonx_ai/instrumentor.py b/agentops/instrumentation/ibm_watsonx_ai/instrumentor.py index 4ced094df..814af6235 100644 --- a/agentops/instrumentation/ibm_watsonx_ai/instrumentor.py +++ b/agentops/instrumentation/ibm_watsonx_ai/instrumentor.py @@ -13,14 +13,11 @@ """ from typing import List, Collection -from opentelemetry.trace import get_tracer -from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.metrics import get_meter from wrapt import wrap_function_wrapper from agentops.logging import logger -from agentops.instrumentation.common.wrappers import WrapConfig, wrap, unwrap -from agentops.instrumentation.ibm_watsonx_ai import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics +from agentops.instrumentation.common.wrappers import WrapConfig from agentops.instrumentation.ibm_watsonx_ai.attributes.attributes import ( get_generate_attributes, get_tokenize_attributes, @@ -28,7 +25,10 @@ get_chat_attributes, ) from agentops.instrumentation.ibm_watsonx_ai.stream_wrapper import generate_text_stream_wrapper, chat_stream_wrapper -from agentops.semconv import Meters + +# Library info for tracer/meter +LIBRARY_NAME = "agentops.instrumentation.ibm_watsonx_ai" +LIBRARY_VERSION = "0.1.0" # Methods to wrap for instrumentation WRAPPED_METHODS: List[WrapConfig] = [ @@ -44,7 +44,7 @@ package="ibm_watsonx_ai.foundation_models.inference", class_name="ModelInference", method_name="generate_text_stream", - handler=None, + handler=None, # Handled by dedicated wrapper ), WrapConfig( trace_name="watsonx.chat", @@ -58,7 +58,7 @@ package="ibm_watsonx_ai.foundation_models.inference", class_name="ModelInference", method_name="chat_stream", - handler=None, + handler=None, # Handled by dedicated wrapper ), WrapConfig( trace_name="watsonx.tokenize", @@ -77,51 +77,36 @@ ] -class IBMWatsonXInstrumentor(BaseInstrumentor): +class IBMWatsonXInstrumentor(BaseAgentOpsInstrumentor): """An instrumentor for IBM watsonx.ai API.""" + def __init__(self): + """Initialize the IBM watsonx.ai instrumentor.""" + super().__init__( + name="ibm_watsonx_ai", + version=LIBRARY_VERSION, + library_name=LIBRARY_NAME, + ) + def instrumentation_dependencies(self) -> Collection[str]: """Return packages required for instrumentation.""" return ["ibm-watsonx-ai >= 1.3.11"] - def _instrument(self, **kwargs): - """Instrument the IBM watsonx.ai API.""" - tracer_provider = kwargs.get("tracer_provider") - tracer = get_tracer(LIBRARY_NAME, LIBRARY_VERSION, tracer_provider) - - meter_provider = kwargs.get("meter_provider") - meter = get_meter(LIBRARY_NAME, LIBRARY_VERSION, meter_provider) - - meter.create_histogram( - name=Meters.LLM_TOKEN_USAGE, - unit="token", - description="Measures number of input and output tokens used with IBM watsonx.ai models", - ) + def _get_wrapped_methods(self) -> List[WrapConfig]: + """Return list of methods to be wrapped. - meter.create_histogram( - name=Meters.LLM_OPERATION_DURATION, - unit="s", - description="IBM watsonx.ai operation duration", - ) + Note: We filter out stream methods here as they need dedicated wrappers. + """ + return [wc for wc in WRAPPED_METHODS if wc.method_name not in ["generate_text_stream", "chat_stream"]] - meter.create_counter( - name=Meters.LLM_COMPLETIONS_EXCEPTIONS, - unit="time", - description="Number of exceptions occurred during IBM watsonx.ai completions", - ) + def _instrument(self, **kwargs): + """Instrument the IBM watsonx.ai API.""" + # Call parent implementation to handle standard method wrapping + super()._instrument(**kwargs) - # Standard method wrapping approach for regular methods - for wrap_config in WRAPPED_METHODS: - try: - # Skip stream methods handled by dedicated wrappers - if wrap_config.method_name in ["generate_text_stream", "chat_stream"]: - continue - wrap(wrap_config, tracer) - logger.debug(f"Wrapped {wrap_config.package}.{wrap_config.class_name}.{wrap_config.method_name}") - except (AttributeError, ModuleNotFoundError) as e: - logger.debug( - f"Could not wrap {wrap_config.package}.{wrap_config.class_name}.{wrap_config.method_name}: {e}" - ) + # Create standard metrics for LLM operations + self._metrics = StandardMetrics(self._meter) + self._metrics.create_llm_metrics(system_name="IBM watsonx.ai", operation_description="IBM watsonx.ai operation") # Dedicated wrappers for stream methods try: @@ -150,14 +135,26 @@ def _instrument(self, **kwargs): except (StopIteration, AttributeError, ModuleNotFoundError) as e: logger.debug(f"Could not wrap chat_stream with dedicated wrapper: {e}") + logger.info("IBM watsonx.ai instrumentation enabled") + def _uninstrument(self, **kwargs): """Remove instrumentation from IBM watsonx.ai API.""" - # Unwrap standard methods + # Call parent implementation to handle standard method unwrapping + super()._uninstrument(**kwargs) + + # Unwrap streaming methods manually + from opentelemetry.instrumentation.utils import unwrap as otel_unwrap + for wrap_config in WRAPPED_METHODS: - try: - unwrap(wrap_config) - logger.debug(f"Unwrapped {wrap_config.package}.{wrap_config.class_name}.{wrap_config.method_name}") - except Exception as e: - logger.debug( - f"Failed to unwrap {wrap_config.package}.{wrap_config.class_name}.{wrap_config.method_name}: {e}" - ) + if wrap_config.method_name in ["generate_text_stream", "chat_stream"]: + try: + otel_unwrap(wrap_config.package, f"{wrap_config.class_name}.{wrap_config.method_name}") + logger.debug( + f"Unwrapped streaming method {wrap_config.package}.{wrap_config.class_name}.{wrap_config.method_name}" + ) + except Exception as e: + logger.debug( + f"Failed to unwrap streaming method {wrap_config.package}.{wrap_config.class_name}.{wrap_config.method_name}: {e}" + ) + + logger.info("IBM watsonx.ai instrumentation disabled") diff --git a/agentops/instrumentation/mem0/instrumentor.py b/agentops/instrumentation/mem0/instrumentor.py index 51a0dac60..3933771c3 100644 --- a/agentops/instrumentation/mem0/instrumentor.py +++ b/agentops/instrumentation/mem0/instrumentor.py @@ -1,10 +1,8 @@ -from typing import Collection -from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.trace import get_tracer -from opentelemetry.metrics import get_meter +from typing import Collection, List from wrapt import wrap_function_wrapper -from agentops.instrumentation.mem0 import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics +from agentops.instrumentation.common.wrappers import WrapConfig from agentops.logging import logger # Import from refactored structure @@ -19,7 +17,9 @@ mem0_history_wrapper, ) -from agentops.semconv import Meters +# Library info for tracer/meter +LIBRARY_NAME = "agentops.instrumentation.mem0" +LIBRARY_VERSION = "0.1.0" # Methods to wrap for instrumentation using specialized wrappers WRAPPER_METHODS = [ @@ -180,7 +180,7 @@ ] -class Mem0Instrumentor(BaseInstrumentor): +class Mem0Instrumentor(BaseAgentOpsInstrumentor): """An instrumentor for Mem0's client library. This class provides instrumentation for Mem0's memory operations by wrapping key methods @@ -194,6 +194,14 @@ class Mem0Instrumentor(BaseInstrumentor): It captures metrics including operation duration, memory counts, and exceptions. """ + def __init__(self): + """Initialize the Mem0 instrumentor.""" + super().__init__( + name="mem0", + version=LIBRARY_VERSION, + library_name=LIBRARY_NAME, + ) + def instrumentation_dependencies(self) -> Collection[str]: """Return packages required for instrumentation. @@ -202,6 +210,14 @@ def instrumentation_dependencies(self) -> Collection[str]: """ return ["mem0ai >= 0.1.10"] + def _get_wrapped_methods(self) -> List[WrapConfig]: + """Return list of methods to be wrapped. + + For Mem0, we don't use the standard wrapping mechanism + since we're using specialized wrappers instead. + """ + return [] + def _instrument(self, **kwargs): """Instrument the Mem0 Memory API. @@ -212,28 +228,24 @@ def _instrument(self, **kwargs): Args: **kwargs: Configuration options for instrumentation. """ - super()._instrument(**kwargs) + # Note: We don't call super()._instrument() here because we're not using + # the standard wrapping mechanism for this special instrumentor + logger.debug("Starting Mem0 instrumentation...") - tracer_provider = kwargs.get("tracer_provider") - tracer = get_tracer(LIBRARY_NAME, LIBRARY_VERSION, tracer_provider) + # Get tracer and meter from base class properties + self._tracer_provider = kwargs.get("tracer_provider") + self._meter_provider = kwargs.get("meter_provider") - meter_provider = kwargs.get("meter_provider") - meter = get_meter(LIBRARY_NAME, LIBRARY_VERSION, meter_provider) + # Initialize tracer and meter (these are set by base class properties) + tracer = self._tracer + meter = self._meter - # Create metrics for memory operations - meter.create_histogram( - name=Meters.LLM_OPERATION_DURATION, - unit="s", - description="Mem0 memory operation duration", - ) - - meter.create_counter( - name=Meters.LLM_COMPLETIONS_EXCEPTIONS, - unit="time", - description="Number of exceptions occurred during Mem0 operations", - ) + # Create standard metrics for memory operations + self._metrics = StandardMetrics(meter) + self._metrics.create_llm_metrics(system_name="Mem0", operation_description="Mem0 memory operation") + # Create additional metrics specific to memory operations meter.create_histogram( name="mem0.memory.count", unit="memory", @@ -254,7 +266,8 @@ def _instrument(self, **kwargs): except Exception as e: # Log unexpected errors as warnings logger.warning(f"Unexpected error wrapping {package}.{class_method}: {e}") - logger.debug("Mem0 instrumentation completed") + + logger.info("Mem0 instrumentation enabled") def _uninstrument(self, **kwargs): """Remove instrumentation from Mem0 Memory API. @@ -265,6 +278,9 @@ def _uninstrument(self, **kwargs): Args: **kwargs: Configuration options for uninstrumentation. """ + # Note: We don't call super()._uninstrument() here because we're not using + # the standard wrapping mechanism for this special instrumentor + # Unwrap specialized methods from opentelemetry.instrumentation.utils import unwrap @@ -275,3 +291,5 @@ def _uninstrument(self, **kwargs): unwrap(package, class_method) except Exception as e: logger.debug(f"Failed to unwrap {package}.{class_method}: {e}") + + logger.info("Mem0 instrumentation disabled") diff --git a/agentops/instrumentation/openai/instrumentor.py b/agentops/instrumentation/openai/instrumentor.py index 63c560d0c..3371e4b12 100644 --- a/agentops/instrumentation/openai/instrumentor.py +++ b/agentops/instrumentation/openai/instrumentor.py @@ -12,11 +12,15 @@ and distributed tracing. """ -from typing import List, Collection -from opentelemetry.trace import get_tracer -from opentelemetry.instrumentation.instrumentor import BaseInstrumentor - -from agentops.instrumentation.common.wrappers import WrapConfig +from typing import Dict, Any + +from agentops.instrumentation.common import ( + BaseAgentOpsInstrumentor, + InstrumentorConfig, + WrapConfig, + StandardMetrics, + MetricsRecorder, +) from agentops.instrumentation.openai import LIBRARY_NAME, LIBRARY_VERSION from agentops.instrumentation.openai.attributes.common import get_response_attributes from agentops.instrumentation.openai.config import Config @@ -34,11 +38,12 @@ ) from agentops.instrumentation.openai.v0 import OpenAIV0Instrumentor from agentops.semconv import Meters +from opentelemetry.metrics import Meter _instruments = ("openai >= 0.27.0",) -class OpenAIInstrumentor(BaseInstrumentor): +class OpenAIInstrumentor(BaseAgentOpsInstrumentor): """An instrumentor for OpenAI's client library with comprehensive coverage.""" def __init__( @@ -50,7 +55,6 @@ def __init__( upload_base64_image=None, enable_trace_context_propagation: bool = True, ): - super().__init__() # Configure the global config with provided options Config.enrich_assistant = enrich_assistant Config.enrich_token_usage = enrich_token_usage @@ -59,103 +63,73 @@ def __init__( Config.upload_base64_image = upload_base64_image Config.enable_trace_context_propagation = enable_trace_context_propagation - def instrumentation_dependencies(self) -> Collection[str]: - return _instruments + # Create instrumentor config + config = InstrumentorConfig( + library_name=LIBRARY_NAME, + library_version=LIBRARY_VERSION, + wrapped_methods=self._get_wrapped_methods(), + metrics_enabled=True, + dependencies=_instruments, + ) - def _instrument(self, **kwargs): - """Instrument the OpenAI API.""" + super().__init__(config) + + def _initialize(self, **kwargs): + """Handle version-specific initialization.""" if not is_openai_v1(): # For v0, use the legacy instrumentor OpenAIV0Instrumentor().instrument(**kwargs) - return - - # Get tracer and meter - tracer_provider = kwargs.get("tracer_provider") - tracer = get_tracer(LIBRARY_NAME, LIBRARY_VERSION, tracer_provider) - - # Define all wrapped methods - wrapped_methods = self._get_wrapped_methods() - - # Apply all wrappers using the common wrapper infrastructure - from agentops.instrumentation.common.wrappers import wrap + # Skip normal instrumentation + self.config.wrapped_methods = [] + + def _create_metrics(self, meter: Meter) -> Dict[str, Any]: + """Create metrics for OpenAI instrumentation.""" + metrics = StandardMetrics.create_standard_metrics(meter) + + # Add OpenAI-specific metrics + metrics.update( + { + "chat_choice_counter": meter.create_counter( + name=Meters.LLM_GENERATION_CHOICES, + unit="choice", + description="Number of choices returned by chat completions call", + ), + "streaming_time_to_first_token": meter.create_histogram( + name=Meters.LLM_STREAMING_TIME_TO_FIRST_TOKEN, + unit="s", + description="Time to first token in streaming chat completions", + ), + "streaming_time_to_generate": meter.create_histogram( + name=Meters.LLM_STREAMING_TIME_TO_GENERATE, + unit="s", + description="Time between first token and completion in streaming chat completions", + ), + "embeddings_vector_size_counter": meter.create_counter( + name=Meters.LLM_EMBEDDINGS_VECTOR_SIZE, + unit="element", + description="The size of returned vector", + ), + "embeddings_exception_counter": meter.create_counter( + name=Meters.LLM_EMBEDDINGS_EXCEPTIONS, + unit="time", + description="Number of exceptions occurred during embeddings operation", + ), + "image_gen_exception_counter": meter.create_counter( + name=Meters.LLM_IMAGE_GENERATIONS_EXCEPTIONS, + unit="time", + description="Number of exceptions occurred during image generations operation", + ), + } + ) - for wrap_config in wrapped_methods: - try: - wrap(wrap_config, tracer) - except (AttributeError, ModuleNotFoundError): - # Some methods may not be available in all versions - pass + return metrics - def _uninstrument(self, **kwargs): - """Remove instrumentation from OpenAI API.""" + def _custom_unwrap(self, **kwargs): + """Handle version-specific uninstrumentation.""" if not is_openai_v1(): OpenAIV0Instrumentor().uninstrument(**kwargs) - return - - # Get all wrapped methods - wrapped_methods = self._get_wrapped_methods() - # Remove all wrappers using the common wrapper infrastructure - from agentops.instrumentation.common.wrappers import unwrap - - for wrap_config in wrapped_methods: - try: - unwrap(wrap_config) - except Exception: - # Some methods may not be wrapped - pass - - def _init_metrics(self, meter): - """Initialize metrics for instrumentation.""" - return { - "tokens_histogram": meter.create_histogram( - name=Meters.LLM_TOKEN_USAGE, - unit="token", - description="Measures number of input and output tokens used", - ), - "chat_choice_counter": meter.create_counter( - name=Meters.LLM_GENERATION_CHOICES, - unit="choice", - description="Number of choices returned by chat completions call", - ), - "duration_histogram": meter.create_histogram( - name=Meters.LLM_OPERATION_DURATION, - unit="s", - description="GenAI operation duration", - ), - "chat_exception_counter": meter.create_counter( - name=Meters.LLM_COMPLETIONS_EXCEPTIONS, - unit="time", - description="Number of exceptions occurred during chat completions", - ), - "streaming_time_to_first_token": meter.create_histogram( - name=Meters.LLM_STREAMING_TIME_TO_FIRST_TOKEN, - unit="s", - description="Time to first token in streaming chat completions", - ), - "streaming_time_to_generate": meter.create_histogram( - name=Meters.LLM_STREAMING_TIME_TO_GENERATE, - unit="s", - description="Time between first token and completion in streaming chat completions", - ), - "embeddings_vector_size_counter": meter.create_counter( - name=Meters.LLM_EMBEDDINGS_VECTOR_SIZE, - unit="element", - description="The size of returned vector", - ), - "embeddings_exception_counter": meter.create_counter( - name=Meters.LLM_EMBEDDINGS_EXCEPTIONS, - unit="time", - description="Number of exceptions occurred during embeddings operation", - ), - "image_gen_exception_counter": meter.create_counter( - name=Meters.LLM_IMAGE_GENERATIONS_EXCEPTIONS, - unit="time", - description="Number of exceptions occurred during image generations operation", - ), - } - - def _get_wrapped_methods(self) -> List[WrapConfig]: + def _get_wrapped_methods(self) -> list[WrapConfig]: """Get all methods that should be wrapped.""" wrapped_methods = [] @@ -331,3 +305,7 @@ def _get_wrapped_methods(self) -> List[WrapConfig]: ) return wrapped_methods + + def get_metrics_recorder(self) -> MetricsRecorder: + """Get a metrics recorder for use in wrappers.""" + return MetricsRecorder(self._metrics) diff --git a/agentops/instrumentation/openai_agents/attributes/common.py b/agentops/instrumentation/openai_agents/attributes/common.py index 93e880cf3..0614ef5a5 100644 --- a/agentops/instrumentation/openai_agents/attributes/common.py +++ b/agentops/instrumentation/openai_agents/attributes/common.py @@ -37,7 +37,7 @@ AgentAttributes.AGENT_TOOLS: "tools", AgentAttributes.HANDOFFS: "handoffs", WorkflowAttributes.WORKFLOW_INPUT: "input", - WorkflowAttributes.FINAL_OUTPUT: "output", + WorkflowAttributes.WORKFLOW_FINAL_OUTPUT: "output", } diff --git a/agentops/instrumentation/smolagents/instrumentor.py b/agentops/instrumentation/smolagents/instrumentor.py index 37b45b750..37b6b6877 100644 --- a/agentops/instrumentation/smolagents/instrumentor.py +++ b/agentops/instrumentation/smolagents/instrumentor.py @@ -1,14 +1,16 @@ """SmoLAgents instrumentation for AgentOps.""" -from typing import Collection -from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.trace import get_tracer, SpanKind +from typing import Collection, List +from opentelemetry.trace import SpanKind from wrapt import wrap_function_wrapper -from agentops.instrumentation.common.wrappers import unwrap +from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics +from agentops.instrumentation.common.wrappers import WrapConfig +from agentops.logging import logger -# Define LIBRARY_VERSION directly to avoid circular import -LIBRARY_VERSION = "1.16.0" +# Library info for tracer/meter +LIBRARY_NAME = "agentops.instrumentation.smolagents" +LIBRARY_VERSION = "0.1.0" # Import attribute handlers try: @@ -51,23 +53,49 @@ def get_stream_attributes(*args, **kwargs): return {} -class SmolAgentsInstrumentor(BaseInstrumentor): +class SmolAgentsInstrumentor(BaseAgentOpsInstrumentor): """Instrumentor for SmoLAgents library.""" + def __init__(self): + """Initialize the SmoLAgents instrumentor.""" + super().__init__( + name="smolagents", + version=LIBRARY_VERSION, + library_name=LIBRARY_NAME, + ) + def instrumentation_dependencies(self) -> Collection[str]: return ( "smolagents >= 1.0.0", "litellm", ) + def _get_wrapped_methods(self) -> List[WrapConfig]: + """Return list of methods to be wrapped. + + For SmoLAgents, we don't use the standard wrapping mechanism + since we need custom wrappers with special logic. + """ + return [] + def _instrument(self, **kwargs): """Instrument SmoLAgents with AgentOps telemetry.""" - tracer_provider = kwargs.get("tracer_provider") - tracer = get_tracer(__name__, LIBRARY_VERSION, tracer_provider) + # Note: We don't call super()._instrument() here because we're not using + # the standard wrapping mechanism for this special instrumentor + + # Get tracer from base class + self._tracer_provider = kwargs.get("tracer_provider") + self._meter_provider = kwargs.get("meter_provider") + + # Initialize tracer and meter (these are set by base class properties) + tracer = self._tracer + + # Create standard metrics + self._metrics = StandardMetrics(self._meter) + self._metrics.create_llm_metrics(system_name="SmoLAgents", operation_description="SmoLAgents operation") # Core agent operations wrap_function_wrapper("smolagents.agents", "CodeAgent.run", self._agent_run_wrapper(tracer)) - wrap_function_wrapper("smolagents.agents", "ToolCallingAgent.run", self._agent_run_wrapper(tracer)) # Tool calling operations @@ -77,9 +105,10 @@ def _instrument(self, **kwargs): # Model operations with proper model name extraction wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate", self._llm_wrapper(tracer)) - wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate_stream", self._llm_wrapper(tracer)) + logger.info("SmoLAgents instrumentation enabled") + def _agent_run_wrapper(self, tracer): """Wrapper for agent run methods.""" @@ -234,9 +263,35 @@ def wrapper(wrapped, instance, args, kwargs): def _uninstrument(self, **kwargs): """Remove instrumentation.""" + # Note: We don't call super()._uninstrument() here because we're not using + # the standard wrapping mechanism for this special instrumentor + # Unwrap all instrumented methods - unwrap("smolagents.agents", "CodeAgent.run") - unwrap("smolagents.agents", "ToolCallingAgent.run") - unwrap("smolagents.agents", "ToolCallingAgent.execute_tool_call") - unwrap("smolagents.models", "LiteLLMModel.generate") - unwrap("smolagents.models", "LiteLLMModel.generate_stream") + from opentelemetry.instrumentation.utils import unwrap + + try: + unwrap("smolagents.agents", "CodeAgent.run") + except Exception as e: + logger.debug(f"Failed to unwrap CodeAgent.run: {e}") + + try: + unwrap("smolagents.agents", "ToolCallingAgent.run") + except Exception as e: + logger.debug(f"Failed to unwrap ToolCallingAgent.run: {e}") + + try: + unwrap("smolagents.agents", "ToolCallingAgent.execute_tool_call") + except Exception as e: + logger.debug(f"Failed to unwrap ToolCallingAgent.execute_tool_call: {e}") + + try: + unwrap("smolagents.models", "LiteLLMModel.generate") + except Exception as e: + logger.debug(f"Failed to unwrap LiteLLMModel.generate: {e}") + + try: + unwrap("smolagents.models", "LiteLLMModel.generate_stream") + except Exception as e: + logger.debug(f"Failed to unwrap LiteLLMModel.generate_stream: {e}") + + logger.info("SmoLAgents instrumentation disabled") diff --git a/agentops/semconv/README.md b/agentops/semconv/README.md index 5c924179b..26a9f86d4 100644 --- a/agentops/semconv/README.md +++ b/agentops/semconv/README.md @@ -1,56 +1,125 @@ # OpenTelemetry Semantic Conventions for Generative AI Systems -## General GenAI Attributes -| Attribute | Type | -|--------------------------------------------|---------| -| `gen_ai.agent.description` | string | -| `gen_ai.agent.id` | string | -| `gen_ai.agent.name` | string | -| `gen_ai.operation.name` | string | -| `gen_ai.output.type` | string | -| `gen_ai.request.choice.count` | int | -| `gen_ai.request.encoding_formats` | string[]| -| `gen_ai.request.frequency_penalty` | double | -| `gen_ai.request.max_tokens` | int | -| `gen_ai.request.model` | string | -| `gen_ai.request.presence_penalty` | double | -| `gen_ai.request.seed` | int | -| `gen_ai.request.stop_sequences` | string[]| -| `gen_ai.request.temperature` | double | -| `gen_ai.request.top_k` | double | -| `gen_ai.request.top_p` | double | -| `gen_ai.response.finish_reasons` | string[]| -| `gen_ai.response.id` | string | -| `gen_ai.response.model` | string | -| `gen_ai.system` | string | -| `gen_ai.token.type` | string | -| `gen_ai.tool.call.id` | string | -| `gen_ai.tool.name` | string | -| `gen_ai.tool.type` | string | -| `gen_ai.usage.input_tokens` | int | -| `gen_ai.usage.output_tokens` | int | - -## OpenAI-Specific Attributes -| Attribute | Type | -|--------------------------------------------|---------| -| `gen_ai.openai.request.service_tier` | string | -| `gen_ai.openai.response.service_tier` | string | -| `gen_ai.openai.response.system_fingerprint`| string | - -## GenAI Event Attributes - -### Event: `gen_ai.system.message` -| Attribute | Type | -|--------------------------------------------|---------| -| `gen_ai.system` | string | - -#### Body Fields -| Attribute | Type | -|--------------------------------------------|---------| -| `content` | string | -| `role` | string | - -### Event: `gen_ai.user.message` -| Attribute | Type | -|--------------------------------------------|---------| -| `gen_ai.system` | string | \ No newline at end of file +This module provides semantic conventions for telemetry data in AI and LLM systems, following OpenTelemetry GenAI conventions where applicable. + +## Core Conventions + +### Agent Attributes (`agent.py`) +```python +from agentops.semconv import AgentAttributes + +AgentAttributes.AGENT_NAME # Agent name +AgentAttributes.AGENT_ROLE # Agent role/type +AgentAttributes.AGENT_ID # Unique agent identifier +``` + +### Tool Attributes (`tool.py`) +```python +from agentops.semconv import ToolAttributes, ToolStatus + +ToolAttributes.TOOL_NAME # Tool name +ToolAttributes.TOOL_PARAMETERS # Tool input parameters +ToolAttributes.TOOL_RESULT # Tool execution result +ToolAttributes.TOOL_STATUS # Tool execution status + +# Tool status values +ToolStatus.EXECUTING # Tool is executing +ToolStatus.SUCCEEDED # Tool completed successfully +ToolStatus.FAILED # Tool execution failed +``` + +### Workflow Attributes (`workflow.py`) +```python +from agentops.semconv import WorkflowAttributes + +WorkflowAttributes.WORKFLOW_NAME # Workflow name +WorkflowAttributes.WORKFLOW_TYPE # Workflow type +WorkflowAttributes.WORKFLOW_STEP_NAME # Step name +WorkflowAttributes.WORKFLOW_STEP_STATUS # Step status +``` + +### LLM/GenAI Attributes (`span_attributes.py`) +Following OpenTelemetry GenAI conventions: + +```python +from agentops.semconv import SpanAttributes + +# Request attributes +SpanAttributes.LLM_REQUEST_MODEL # Model name (e.g., "gpt-4") +SpanAttributes.LLM_REQUEST_TEMPERATURE # Temperature setting +SpanAttributes.LLM_REQUEST_MAX_TOKENS # Max tokens to generate + +# Response attributes +SpanAttributes.LLM_RESPONSE_MODEL # Model that generated response +SpanAttributes.LLM_RESPONSE_FINISH_REASON # Why generation stopped + +# Token usage +SpanAttributes.LLM_USAGE_PROMPT_TOKENS # Input tokens +SpanAttributes.LLM_USAGE_COMPLETION_TOKENS # Output tokens +SpanAttributes.LLM_USAGE_TOTAL_TOKENS # Total tokens +``` + +### Message Attributes (`message.py`) +For chat-based interactions: + +```python +from agentops.semconv import MessageAttributes + +# Prompt messages (indexed) +MessageAttributes.PROMPT_ROLE.format(i=0) # Role at index 0 +MessageAttributes.PROMPT_CONTENT.format(i=0) # Content at index 0 + +# Completion messages (indexed) +MessageAttributes.COMPLETION_ROLE.format(i=0) # Role at index 0 +MessageAttributes.COMPLETION_CONTENT.format(i=0) # Content at index 0 + +# Tool calls (indexed) +MessageAttributes.TOOL_CALL_NAME.format(i=0) # Tool name +MessageAttributes.TOOL_CALL_ARGUMENTS.format(i=0) # Tool arguments +``` + +### Core Attributes (`core.py`) +```python +from agentops.semconv import CoreAttributes + +CoreAttributes.TRACE_ID # Trace identifier +CoreAttributes.SPAN_ID # Span identifier +CoreAttributes.PARENT_ID # Parent span identifier +CoreAttributes.TAGS # User-defined tags +``` + +## Usage Guidelines + +1. **Follow OpenTelemetry conventions** - Use `gen_ai.*` prefixed attributes for LLM operations +2. **Use indexed attributes for collections** - Messages, tool calls, etc. should use `.format(i=index)` +3. **Prefer specific over generic** - Use `SpanAttributes.LLM_REQUEST_MODEL` over custom attributes +4. **Document custom attributes** - If you need provider-specific attributes, document them clearly + +## Provider-Specific Conventions + +### OpenAI +- `SpanAttributes.LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT` +- `SpanAttributes.LLM_OPENAI_API_VERSION` + +### LangChain +- `LangChainAttributes.CHAIN_TYPE` +- `LangChainAttributes.TOOL_NAME` + +## Metrics (`meters.py`) + +Standard metrics for instrumentation: + +```python +from agentops.semconv import Meters + +Meters.LLM_TOKEN_USAGE # Token usage histogram +Meters.LLM_OPERATION_DURATION # Operation duration histogram +Meters.LLM_COMPLETIONS_EXCEPTIONS # Exception counter +``` + +## Best Practices + +1. **Consistency** - Use the same attributes across instrumentations +2. **Completeness** - Capture essential attributes for debugging +3. **Performance** - Avoid capturing large payloads as attributes +4. **Privacy** - Be mindful of sensitive data in attributes \ No newline at end of file diff --git a/agentops/semconv/workflow.py b/agentops/semconv/workflow.py index e2bdfbaf5..2bc46e0a7 100644 --- a/agentops/semconv/workflow.py +++ b/agentops/semconv/workflow.py @@ -4,78 +4,55 @@ class WorkflowAttributes: """Workflow specific attributes.""" - # Workflow attributes + # Core workflow attributes WORKFLOW_NAME = "workflow.name" # Name of the workflow WORKFLOW_TYPE = "workflow.type" # Type of workflow + WORKFLOW_ID = "workflow.workflow_id" # Unique identifier for the workflow instance + WORKFLOW_RUN_ID = "workflow.run_id" # Unique identifier for this workflow run + WORKFLOW_DESCRIPTION = "workflow.description" # Description of the workflow + # Input/Output WORKFLOW_INPUT = "workflow.input" # Input to the workflow WORKFLOW_INPUT_TYPE = "workflow.input.type" # Type of input to the workflow - WORKFLOW_OUTPUT = "workflow.output" # Output from the workflow WORKFLOW_OUTPUT_TYPE = "workflow.output.type" # Type of output from the workflow - - MAX_TURNS = "workflow.max_turns" # Maximum number of turns in a workflow - FINAL_OUTPUT = "workflow.final_output" # Final output of the workflow + WORKFLOW_FINAL_OUTPUT = "workflow.final_output" # Final output of the workflow # Workflow step attributes + WORKFLOW_STEP = "workflow.step" WORKFLOW_STEP_TYPE = "workflow.step.type" # Type of workflow step WORKFLOW_STEP_NAME = "workflow.step.name" # Name of the workflow step WORKFLOW_STEP_INPUT = "workflow.step.input" # Input to the workflow step WORKFLOW_STEP_OUTPUT = "workflow.step.output" # Output from the workflow step WORKFLOW_STEP_STATUS = "workflow.step.status" # Status of the workflow step WORKFLOW_STEP_ERROR = "workflow.step.error" # Error from the workflow step - WORKFLOW_STEP = "workflow.step" - # Core workflow identification - WORKFLOW_ID = "workflow.workflow_id" # Unique identifier for the workflow instance - WORKFLOW_DESCRIPTION = "workflow.description" # Description of the workflow - WORKFLOW_APP_ID = "workflow.app_id" # Application ID associated with the workflow + # Configuration + WORKFLOW_MAX_TURNS = "workflow.max_turns" # Maximum number of turns in a workflow + WORKFLOW_DEBUG_MODE = "workflow.debug_mode" # Whether debug mode is enabled - # Session and user context + # Session context (simplified) WORKFLOW_SESSION_ID = "workflow.session_id" # Session ID for the workflow execution - WORKFLOW_SESSION_NAME = "workflow.session_name" # Name of the workflow session WORKFLOW_USER_ID = "workflow.user_id" # User ID associated with the workflow + WORKFLOW_APP_ID = "workflow.app_id" # Application ID associated with the workflow - # Run-specific attributes - WORKFLOW_RUN_ID = "workflow.run_id" # Unique identifier for this workflow run - - # Configuration flags - WORKFLOW_DEBUG_MODE = "workflow.debug_mode" # Whether debug mode is enabled - WORKFLOW_MONITORING = "workflow.monitoring" # Whether monitoring is enabled - WORKFLOW_TELEMETRY = "workflow.telemetry" # Whether telemetry is enabled - - # Memory and storage - WORKFLOW_MEMORY_TYPE = "workflow.memory.type" # Type of memory used by workflow - WORKFLOW_STORAGE_TYPE = "workflow.storage.type" # Type of storage used by workflow - - # Input parameters metadata + # Input metadata WORKFLOW_INPUT_PARAMETER_COUNT = "workflow.input.parameter_count" # Number of input parameters - WORKFLOW_INPUT_PARAMETER_KEYS = "workflow.input.parameter_keys" # Keys of input parameters - - # Method metadata WORKFLOW_METHOD_PARAMETER_COUNT = "workflow.method.parameter_count" # Number of method parameters WORKFLOW_METHOD_RETURN_TYPE = "workflow.method.return_type" # Return type of the workflow method - # Output metadata + # Output metadata (commonly used) WORKFLOW_OUTPUT_CONTENT_TYPE = "workflow.output.content_type" # Content type of the output - WORKFLOW_OUTPUT_EVENT = "workflow.output.event" # Event type in the output WORKFLOW_OUTPUT_MODEL = "workflow.output.model" # Model used for the output - WORKFLOW_OUTPUT_MODEL_PROVIDER = "workflow.output.model_provider" # Provider of the model WORKFLOW_OUTPUT_MESSAGE_COUNT = "workflow.output.message_count" # Number of messages in output WORKFLOW_OUTPUT_TOOL_COUNT = "workflow.output.tool_count" # Number of tools in output + WORKFLOW_OUTPUT_IS_STREAMING = "workflow.output.is_streaming" # Whether output is streaming + + # Media counts (used by agno) WORKFLOW_OUTPUT_IMAGE_COUNT = "workflow.output.image_count" # Number of images in output WORKFLOW_OUTPUT_VIDEO_COUNT = "workflow.output.video_count" # Number of videos in output WORKFLOW_OUTPUT_AUDIO_COUNT = "workflow.output.audio_count" # Number of audio items in output - WORKFLOW_OUTPUT_IS_STREAMING = "workflow.output.is_streaming" # Whether output is streaming - # Session-specific attributes - WORKFLOW_SESSION_SESSION_ID = "workflow.session.session_id" # Session ID in session context - WORKFLOW_SESSION_SESSION_NAME = "workflow.session.session_name" # Session name in session context + # Session-specific attributes (used by agno) WORKFLOW_SESSION_WORKFLOW_ID = "workflow.session.workflow_id" # Workflow ID in session context WORKFLOW_SESSION_USER_ID = "workflow.session.user_id" # User ID in session context - WORKFLOW_SESSION_STATE_KEYS = "workflow.session.state_keys" # Keys in session state - WORKFLOW_SESSION_STATE_SIZE = "workflow.session.state_size" # Size of session state - WORKFLOW_SESSION_STORAGE_TYPE = "workflow.session.storage_type" # Storage type for session - WORKFLOW_SESSION_RETURNED_SESSION_ID = "workflow.session.returned_session_id" # Session ID returned - WORKFLOW_SESSION_CREATED_AT = "workflow.session.created_at" # Session creation timestamp - WORKFLOW_SESSION_UPDATED_AT = "workflow.session.updated_at" # Session update timestamp diff --git a/tests/unit/instrumentation/anthropic/test_instrumentor.py b/tests/unit/instrumentation/anthropic/test_instrumentor.py index a00b9ba28..3123fb80e 100644 --- a/tests/unit/instrumentation/anthropic/test_instrumentor.py +++ b/tests/unit/instrumentation/anthropic/test_instrumentor.py @@ -17,35 +17,61 @@ def test_instrumentor_setup(mock_tracer, mock_meter): instrumentor = AnthropicInstrumentor() with ( - patch( - "agentops.instrumentation.anthropic.instrumentor.get_tracer", return_value=mock_tracer - ) as mock_get_tracer, - patch("agentops.instrumentation.anthropic.instrumentor.get_meter", return_value=mock_meter) as mock_get_meter, + patch("agentops.instrumentation.common.instrumentor.get_tracer", return_value=mock_tracer) as mock_get_tracer, + patch("agentops.instrumentation.common.instrumentor.get_meter", return_value=mock_meter) as mock_get_meter, ): + # Call _instrument - this is when get_tracer and get_meter are called instrumentor._instrument() + # Verify tracer and meter were requested with correct params mock_get_tracer.assert_called_with(LIBRARY_NAME, LIBRARY_VERSION, None) mock_get_meter.assert_called_with(LIBRARY_NAME, LIBRARY_VERSION, None) + # Verify they were stored correctly + assert instrumentor._tracer == mock_tracer + assert instrumentor._meter == mock_meter + def test_instrumentor_wraps_methods(mock_tracer, mock_meter): """Test that the instrumentor correctly wraps both standard and streaming methods with proper instrumentation.""" instrumentor = AnthropicInstrumentor() - mock_wrap = MagicMock() + + # Mock the anthropic module structure to prevent import errors + mock_anthropic = MagicMock() + mock_messages_module = MagicMock() + mock_completions_module = MagicMock() + + # Set up the class structure + mock_messages_module.Messages = MagicMock() + mock_messages_module.AsyncMessages = MagicMock() + mock_completions_module.Completions = MagicMock() + mock_completions_module.AsyncCompletions = MagicMock() with ( - patch("agentops.instrumentation.anthropic.instrumentor.get_tracer", return_value=mock_tracer), - patch("agentops.instrumentation.anthropic.instrumentor.get_meter", return_value=mock_meter), - patch("agentops.instrumentation.anthropic.instrumentor.wrap", mock_wrap), - patch("agentops.instrumentation.anthropic.instrumentor.wrap_function_wrapper") as mock_wrap_function, + patch.dict( + "sys.modules", + { + "anthropic": mock_anthropic, + "anthropic.resources": mock_anthropic.resources, + "anthropic.resources.messages": mock_messages_module, + "anthropic.resources.completions": mock_completions_module, + "anthropic.resources.messages.messages": mock_messages_module, + }, + ), + patch("agentops.instrumentation.common.instrumentor.get_tracer", return_value=mock_tracer), + patch("agentops.instrumentation.common.instrumentor.get_meter", return_value=mock_meter), + patch("agentops.instrumentation.common.wrappers.wrap_function_wrapper") as mock_wrap_function, + patch("agentops.instrumentation.anthropic.instrumentor.wrap_function_wrapper") as mock_stream_wrap, ): instrumentor._instrument() - assert mock_wrap.call_count == 4 + # The base instrumentor will call wrap_function_wrapper for each wrapped method + assert mock_wrap_function.call_count == 4 - mock_wrap_function.assert_any_call("anthropic.resources.messages.messages", "Messages.stream", ANY) - mock_wrap_function.assert_any_call("anthropic.resources.messages.messages", "AsyncMessages.stream", ANY) + # Check that streaming methods were wrapped with custom wrappers + mock_stream_wrap.assert_any_call("anthropic.resources.messages.messages", "Messages.stream", ANY) + mock_stream_wrap.assert_any_call("anthropic.resources.messages.messages", "AsyncMessages.stream", ANY) def test_instrumentor_uninstrument(mock_tracer, mock_meter): @@ -54,16 +80,47 @@ def test_instrumentor_uninstrument(mock_tracer, mock_meter): instrumentor = AnthropicInstrumentor() mock_unwrap = MagicMock() + # Mock the anthropic module structure + mock_anthropic = MagicMock() + mock_messages_module = MagicMock() + mock_completions_module = MagicMock() + + # Set up the class structure + mock_messages_module.Messages = MagicMock() + mock_messages_module.AsyncMessages = MagicMock() + mock_completions_module.Completions = MagicMock() + mock_completions_module.AsyncCompletions = MagicMock() + with ( - patch("agentops.instrumentation.anthropic.instrumentor.get_tracer", return_value=mock_tracer), - patch("agentops.instrumentation.anthropic.instrumentor.get_meter", return_value=mock_meter), - patch("agentops.instrumentation.anthropic.instrumentor.unwrap", mock_unwrap), - patch("opentelemetry.instrumentation.utils.unwrap") as mock_otel_unwrap, + patch.dict( + "sys.modules", + { + "anthropic": mock_anthropic, + "anthropic.resources": mock_anthropic.resources, + "anthropic.resources.messages": mock_messages_module, + "anthropic.resources.completions": mock_completions_module, + "anthropic.resources.messages.messages": mock_messages_module, + }, + ), + patch("agentops.instrumentation.common.instrumentor.get_tracer", return_value=mock_tracer), + patch("agentops.instrumentation.common.instrumentor.get_meter", return_value=mock_meter), + patch("agentops.instrumentation.common.instrumentor.unwrap", mock_unwrap), # Patch where it's imported + patch( + "agentops.instrumentation.anthropic.instrumentor.otel_unwrap" + ) as mock_otel_unwrap, # Patch in anthropic module + patch("agentops.instrumentation.common.wrappers.wrap_function_wrapper"), + patch("agentops.instrumentation.anthropic.instrumentor.wrap_function_wrapper"), ): + # Instrument first + instrumentor._instrument() + + # Now uninstrument instrumentor._uninstrument() + # Should unwrap all 4 configured methods assert mock_unwrap.call_count == 4 + # Should also unwrap the custom stream methods mock_otel_unwrap.assert_any_call("anthropic.resources.messages.messages", "Messages.stream") mock_otel_unwrap.assert_any_call("anthropic.resources.messages.messages", "AsyncMessages.stream") @@ -76,10 +133,11 @@ def test_instrumentor_handles_missing_methods(mock_tracer, mock_meter): mock_wrap_function = MagicMock(side_effect=AttributeError) with ( - patch("agentops.instrumentation.anthropic.instrumentor.get_tracer", return_value=mock_tracer), - patch("agentops.instrumentation.anthropic.instrumentor.get_meter", return_value=mock_meter), - patch("agentops.instrumentation.anthropic.instrumentor.wrap", mock_wrap), - patch("wrapt.wrap_function_wrapper", mock_wrap_function), + patch("agentops.instrumentation.common.instrumentor.get_tracer", return_value=mock_tracer), + patch("agentops.instrumentation.common.instrumentor.get_meter", return_value=mock_meter), + patch("agentops.instrumentation.common.wrappers.wrap", mock_wrap), + patch("agentops.instrumentation.anthropic.instrumentor.wrap_function_wrapper", mock_wrap_function), ): + # Should not raise exceptions even when wrapping fails instrumentor._instrument() instrumentor._uninstrument() diff --git a/tests/unit/instrumentation/mock_span.py b/tests/unit/instrumentation/mock_span.py index 0ada94922..f14da110c 100644 --- a/tests/unit/instrumentation/mock_span.py +++ b/tests/unit/instrumentation/mock_span.py @@ -208,7 +208,7 @@ def process_with_instrumentor(mock_span, exporter_class, captured_attributes: Di if hasattr(mock_span.span_data, "input"): captured_attributes[WorkflowAttributes.WORKFLOW_INPUT] = mock_span.span_data.input if hasattr(mock_span.span_data, "output"): - captured_attributes[WorkflowAttributes.FINAL_OUTPUT] = mock_span.span_data.output + captured_attributes[WorkflowAttributes.WORKFLOW_FINAL_OUTPUT] = mock_span.span_data.output if hasattr(mock_span.span_data, "tools"): captured_attributes[AgentAttributes.AGENT_TOOLS] = ",".join(mock_span.span_data.tools) if hasattr(mock_span.span_data, "target_agent"): @@ -220,7 +220,7 @@ def process_with_instrumentor(mock_span, exporter_class, captured_attributes: Di if hasattr(mock_span.span_data, "input"): captured_attributes[WorkflowAttributes.WORKFLOW_INPUT] = json.dumps(mock_span.span_data.input) if hasattr(mock_span.span_data, "output"): - captured_attributes[WorkflowAttributes.FINAL_OUTPUT] = json.dumps(mock_span.span_data.output) + captured_attributes[WorkflowAttributes.WORKFLOW_FINAL_OUTPUT] = json.dumps(mock_span.span_data.output) if hasattr(mock_span.span_data, "from_agent"): captured_attributes[AgentAttributes.FROM_AGENT] = mock_span.span_data.from_agent diff --git a/tests/unit/instrumentation/openai_agents/test_openai_agents.py b/tests/unit/instrumentation/openai_agents/test_openai_agents.py index dc5ef774f..57421c538 100644 --- a/tests/unit/instrumentation/openai_agents/test_openai_agents.py +++ b/tests/unit/instrumentation/openai_agents/test_openai_agents.py @@ -293,13 +293,13 @@ def test_span_hierarchy_and_attributes(self, instrumentation): # Verify parent span attributes assert parent_captured_attributes[AgentAttributes.AGENT_NAME] == "parent_agent" assert parent_captured_attributes[WorkflowAttributes.WORKFLOW_INPUT] == "parent input" - assert parent_captured_attributes[WorkflowAttributes.FINAL_OUTPUT] == "parent output" + assert parent_captured_attributes[WorkflowAttributes.WORKFLOW_FINAL_OUTPUT] == "parent output" assert parent_captured_attributes[AgentAttributes.AGENT_TOOLS] == '["tool1", "tool2"]' # JSON encoded is fine. # Verify child span attributes assert child_captured_attributes[AgentAttributes.AGENT_NAME] == "child_agent" assert child_captured_attributes[WorkflowAttributes.WORKFLOW_INPUT] == "child input" - assert child_captured_attributes[WorkflowAttributes.FINAL_OUTPUT] == "child output" + assert child_captured_attributes[WorkflowAttributes.WORKFLOW_FINAL_OUTPUT] == "child output" assert child_captured_attributes[AgentAttributes.FROM_AGENT] == "parent_agent" # Verify parent-child relationship @@ -346,7 +346,7 @@ def test_process_agent_span_fixed(self, instrumentation): assert captured_attributes[AgentAttributes.AGENT_NAME] == "test_agent" assert captured_attributes[WorkflowAttributes.WORKFLOW_INPUT] == "What can you help me with?" assert ( - captured_attributes[WorkflowAttributes.FINAL_OUTPUT] + captured_attributes[WorkflowAttributes.WORKFLOW_FINAL_OUTPUT] == "I can help you with finding information, answering questions, and more." ) assert "search" in captured_attributes[AgentAttributes.AGENT_TOOLS] @@ -398,9 +398,9 @@ def test_process_function_span(self, instrumentation): assert captured_attributes[WorkflowAttributes.WORKFLOW_INPUT] is not None assert "New York" in captured_attributes[WorkflowAttributes.WORKFLOW_INPUT] assert "Boston" in captured_attributes[WorkflowAttributes.WORKFLOW_INPUT] - assert captured_attributes[WorkflowAttributes.FINAL_OUTPUT] is not None - assert "215" in captured_attributes[WorkflowAttributes.FINAL_OUTPUT] - assert "miles" in captured_attributes[WorkflowAttributes.FINAL_OUTPUT] + assert captured_attributes[WorkflowAttributes.WORKFLOW_FINAL_OUTPUT] is not None + assert "215" in captured_attributes[WorkflowAttributes.WORKFLOW_FINAL_OUTPUT] + assert "miles" in captured_attributes[WorkflowAttributes.WORKFLOW_FINAL_OUTPUT] assert captured_attributes[AgentAttributes.FROM_AGENT] == "navigator" # Verify function attributes - don't test for a specific type field diff --git a/tests/unit/instrumentation/openai_core/test_instrumentor.py b/tests/unit/instrumentation/openai_core/test_instrumentor.py index 161576cec..1bc04fa70 100644 --- a/tests/unit/instrumentation/openai_core/test_instrumentor.py +++ b/tests/unit/instrumentation/openai_core/test_instrumentor.py @@ -38,34 +38,47 @@ class TestOpenAIInstrumentor: @pytest.fixture def instrumentor(self): """Set up OpenAI instrumentor for tests""" - # Create a real instrumentation setup for testing - mock_tracer_provider = MagicMock() - instrumentor = OpenAIInstrumentor() + # Create patches for tracer and meter + with patch("agentops.instrumentation.common.instrumentor.get_tracer") as mock_get_tracer: + with patch("agentops.instrumentation.common.instrumentor.get_meter") as mock_get_meter: + # Set up mock tracer and meter + mock_tracer = MagicMock() + mock_meter = MagicMock() + mock_get_tracer.return_value = mock_tracer + mock_get_meter.return_value = mock_meter + + # Create a real instrumentation setup for testing + mock_tracer_provider = MagicMock() + instrumentor = OpenAIInstrumentor() + + # To avoid timing issues with the fixture, we need to ensure patch + # objects are created before being used in the test + mock_wrap = patch("agentops.instrumentation.common.instrumentor.wrap").start() + mock_unwrap = patch("agentops.instrumentation.common.instrumentor.unwrap").start() + mock_instrument = patch.object(instrumentor, "_instrument", wraps=instrumentor._instrument).start() + mock_uninstrument = patch.object( + instrumentor, "_uninstrument", wraps=instrumentor._uninstrument + ).start() + + # Instrument + instrumentor._instrument(tracer_provider=mock_tracer_provider) + + yield { + "instrumentor": instrumentor, + "tracer_provider": mock_tracer_provider, + "mock_wrap": mock_wrap, + "mock_unwrap": mock_unwrap, + "mock_instrument": mock_instrument, + "mock_uninstrument": mock_uninstrument, + "mock_tracer": mock_tracer, + "mock_meter": mock_meter, + } + + # Uninstrument - must happen before stopping patches + instrumentor._uninstrument() - # To avoid timing issues with the fixture, we need to ensure patch - # objects are created before being used in the test - mock_wrap = patch("agentops.instrumentation.common.wrappers.wrap").start() - mock_unwrap = patch("agentops.instrumentation.common.wrappers.unwrap").start() - mock_instrument = patch.object(instrumentor, "_instrument", wraps=instrumentor._instrument).start() - mock_uninstrument = patch.object(instrumentor, "_uninstrument", wraps=instrumentor._uninstrument).start() - - # Instrument - instrumentor._instrument(tracer_provider=mock_tracer_provider) - - yield { - "instrumentor": instrumentor, - "tracer_provider": mock_tracer_provider, - "mock_wrap": mock_wrap, - "mock_unwrap": mock_unwrap, - "mock_instrument": mock_instrument, - "mock_uninstrument": mock_uninstrument, - } - - # Uninstrument - must happen before stopping patches - instrumentor._uninstrument() - - # Stop patches - patch.stopall() + # Stop patches + patch.stopall() def test_instrumentor_initialization(self): """Test instrumentor is initialized with correct configuration""" From 8732beb96be731cec2f9fef7445642dbca8acf0e Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Wed, 18 Jun 2025 05:54:44 +0530 Subject: [PATCH 2/8] refactor: centralize library version management across instrumentation modules --- agentops/instrumentation/__init__.py | 10 +-- agentops/instrumentation/ag2/__init__.py | 20 ++---- agentops/instrumentation/agno/__init__.py | 9 +-- .../instrumentation/anthropic/__init__.py | 28 ++------ agentops/instrumentation/common/__init__.py | 7 ++ .../instrumentation/common/span_management.py | 10 +-- agentops/instrumentation/common/version.py | 71 +++++++++++++++++++ .../instrumentation/google_adk/__init__.py | 15 ++-- .../instrumentation/google_genai/__init__.py | 28 ++------ .../ibm_watsonx_ai/__init__.py | 24 +++---- agentops/instrumentation/mem0/__init__.py | 20 ++---- agentops/instrumentation/openai/__init__.py | 20 ++---- .../instrumentation/openai_agents/__init__.py | 20 ++---- .../openai_agents/attributes/common.py | 2 +- .../instrumentation/smolagents/__init__.py | 8 ++- agentops/semconv/agent.py | 1 - agentops/semconv/core.py | 7 -- agentops/semconv/instrumentation.py | 1 - agentops/semconv/span_kinds.py | 50 +++++++------ agentops/semconv/workflow.py | 7 +- 20 files changed, 181 insertions(+), 177 deletions(-) create mode 100644 agentops/instrumentation/common/version.py diff --git a/agentops/instrumentation/__init__.py b/agentops/instrumentation/__init__.py index 11f7b6331..e902262df 100644 --- a/agentops/instrumentation/__init__.py +++ b/agentops/instrumentation/__init__.py @@ -22,7 +22,6 @@ from dataclasses import dataclass import importlib import sys -from importlib.metadata import version from packaging.version import Version, parse import builtins @@ -34,6 +33,7 @@ from agentops.logging import logger from agentops.sdk.core import tracer +from agentops.instrumentation.common import get_library_version # Define the structure for instrumentor configurations @@ -456,9 +456,11 @@ def should_activate(self) -> bool: provider_name = self.package_name else: provider_name = self.module_name.split(".")[-1] - module_version = version(provider_name) - return module_version is not None and Version(module_version) >= parse(self.min_version) - except ImportError: + + # Use common version utility + module_version = get_library_version(provider_name) + return module_version != "unknown" and Version(module_version) >= parse(self.min_version) + except Exception: return False def get_instance(self) -> BaseInstrumentor: diff --git a/agentops/instrumentation/ag2/__init__.py b/agentops/instrumentation/ag2/__init__.py index 876550056..ddfc1c8a9 100644 --- a/agentops/instrumentation/ag2/__init__.py +++ b/agentops/instrumentation/ag2/__init__.py @@ -5,22 +5,12 @@ than individual message exchanges. """ -from agentops.logging import logger +from agentops.instrumentation.common import LibraryInfo - -def get_version() -> str: - """Get the version of the AG2 package, or 'unknown' if not found""" - try: - from importlib.metadata import version - - return version("ag2") - except ImportError: - logger.debug("Could not find AG2 version") - return "unknown" - - -LIBRARY_NAME = "ag2" -LIBRARY_VERSION: str = get_version() +# Library information +_library_info = LibraryInfo(name="ag2") +LIBRARY_NAME = _library_info.name +LIBRARY_VERSION = _library_info.version # Import after defining constants to avoid circular imports from agentops.instrumentation.ag2.instrumentor import AG2Instrumentor # noqa: E402 diff --git a/agentops/instrumentation/agno/__init__.py b/agentops/instrumentation/agno/__init__.py index c6c04a7fc..4d56ff728 100644 --- a/agentops/instrumentation/agno/__init__.py +++ b/agentops/instrumentation/agno/__init__.py @@ -1,15 +1,16 @@ """Agno Agent instrumentation package.""" import logging +from agentops.instrumentation.common import LibraryInfo from .instrumentor import AgnoInstrumentor logger = logging.getLogger(__name__) -__version__ = "1.0.0" - -LIBRARY_NAME = "agno" -LIBRARY_VERSION = __version__ +# Library information +_library_info = LibraryInfo(name="agno") +LIBRARY_NAME = _library_info.name +LIBRARY_VERSION = _library_info.version __all__ = [ "AgnoInstrumentor", diff --git a/agentops/instrumentation/anthropic/__init__.py b/agentops/instrumentation/anthropic/__init__.py index e8582834f..47b4b758a 100644 --- a/agentops/instrumentation/anthropic/__init__.py +++ b/agentops/instrumentation/anthropic/__init__.py @@ -5,31 +5,15 @@ """ import logging - - -def get_version() -> str: - """Get the version of the Anthropic SDK, or 'unknown' if not found - - Attempts to retrieve the installed version of the Anthropic SDK using importlib.metadata. - Falls back to 'unknown' if the version cannot be determined. - - Returns: - The version string of the Anthropic SDK or 'unknown' - """ - try: - from importlib.metadata import version - - return version("anthropic") - except ImportError: - logger.debug("Could not find Anthropic SDK version") - return "unknown" - - -LIBRARY_NAME = "anthropic" -LIBRARY_VERSION: str = get_version() +from agentops.instrumentation.common import LibraryInfo logger = logging.getLogger(__name__) +# Library information +_library_info = LibraryInfo(name="anthropic") +LIBRARY_NAME = _library_info.name +LIBRARY_VERSION = _library_info.version + # Import after defining constants to avoid circular imports from agentops.instrumentation.anthropic.instrumentor import AnthropicInstrumentor # noqa: E402 diff --git a/agentops/instrumentation/common/__init__.py b/agentops/instrumentation/common/__init__.py index 9b734c320..81bd8f162 100644 --- a/agentops/instrumentation/common/__init__.py +++ b/agentops/instrumentation/common/__init__.py @@ -35,6 +35,10 @@ create_stream_wrapper_factory, StreamingResponseHandler, ) +from agentops.instrumentation.common.version import ( + get_library_version, + LibraryInfo, +) __all__ = [ # Attributes @@ -72,4 +76,7 @@ "AsyncStreamWrapper", "create_stream_wrapper_factory", "StreamingResponseHandler", + # Version + "get_library_version", + "LibraryInfo", ] diff --git a/agentops/instrumentation/common/span_management.py b/agentops/instrumentation/common/span_management.py index 9d3f4b26d..ac0d323e8 100644 --- a/agentops/instrumentation/common/span_management.py +++ b/agentops/instrumentation/common/span_management.py @@ -15,7 +15,6 @@ from agentops.logging import logger from agentops.semconv import CoreAttributes -from agentops import get_client class SpanAttributeManager: @@ -33,9 +32,12 @@ def set_common_attributes(self, span: Span): def set_config_tags(self, span: Span): """Set tags from AgentOps config on a span.""" - config = get_client().config - if config.default_tags and len(config.default_tags) > 0: - tag_list = list(config.default_tags) + # Import locally to avoid circular dependency + from agentops import get_client + + client = get_client() + if client and client.config.default_tags and len(client.config.default_tags) > 0: + tag_list = list(client.config.default_tags) span.set_attribute(CoreAttributes.TAGS, tag_list) diff --git a/agentops/instrumentation/common/version.py b/agentops/instrumentation/common/version.py new file mode 100644 index 000000000..a9a57b7c4 --- /dev/null +++ b/agentops/instrumentation/common/version.py @@ -0,0 +1,71 @@ +"""Version utilities for AgentOps instrumentation. + +This module provides common functionality for retrieving and managing +library versions across all instrumentation modules. +""" + +import logging +from typing import Optional + +logger = logging.getLogger(__name__) + + +def get_library_version(package_name: str, default_version: str = "unknown") -> str: + """Get the version of a library package. + + Attempts to retrieve the installed version of a package using importlib.metadata. + Falls back to the default version if the version cannot be determined. + + Args: + package_name: The name of the package to get the version for (as used in pip/importlib.metadata) + default_version: The default version to return if the package version cannot be found + + Returns: + The version string of the package or the default version + + Examples: + >>> get_library_version("openai") + "1.0.0" + + >>> get_library_version("nonexistent-package") + "unknown" + + >>> get_library_version("ibm-watsonx-ai", "1.3.11") + "1.3.11" # If not found + """ + try: + from importlib.metadata import version + + return version(package_name) + except (ImportError, Exception) as e: + logger.debug(f"Could not find {package_name} version: {e}") + return default_version + + +class LibraryInfo: + """Container for library information used in instrumentation. + + This class provides a standardized way to store and access library + information (name and version) across all instrumentors. + + Attributes: + name: The library name used for identification + version: The library version string + package_name: The package name used in pip/importlib.metadata (optional) + """ + + def __init__(self, name: str, package_name: Optional[str] = None, default_version: str = "unknown"): + """Initialize library information. + + Args: + name: The library name used for identification + package_name: The package name used in pip/importlib.metadata. + If not provided, uses the library name. + default_version: Default version if package version cannot be determined + """ + self.name = name + self.package_name = package_name or name + self.version = get_library_version(self.package_name, default_version) + + def __repr__(self) -> str: + return f"LibraryInfo(name={self.name!r}, version={self.version!r})" diff --git a/agentops/instrumentation/google_adk/__init__.py b/agentops/instrumentation/google_adk/__init__.py index ac8bcd215..f2d9e9542 100644 --- a/agentops/instrumentation/google_adk/__init__.py +++ b/agentops/instrumentation/google_adk/__init__.py @@ -4,15 +4,14 @@ capturing agent execution, LLM calls, tool calls, and other ADK-specific events. """ -from importlib.metadata import version, PackageNotFoundError +from agentops.instrumentation.common import LibraryInfo -try: - __version__ = version("google-adk") -except PackageNotFoundError: - __version__ = "0.0.0" - -LIBRARY_NAME = "agentops.instrumentation.google_adk" -LIBRARY_VERSION = __version__ +# Library information +_library_info = LibraryInfo( + name="agentops.instrumentation.google_adk", package_name="google-adk", default_version="0.0.0" +) +LIBRARY_NAME = _library_info.name +LIBRARY_VERSION = _library_info.version from agentops.instrumentation.google_adk.instrumentor import GoogleADKInstrumentor # noqa: E402 from agentops.instrumentation.google_adk import patch # noqa: E402 diff --git a/agentops/instrumentation/google_genai/__init__.py b/agentops/instrumentation/google_genai/__init__.py index 6a7ee24fa..395811d34 100644 --- a/agentops/instrumentation/google_genai/__init__.py +++ b/agentops/instrumentation/google_genai/__init__.py @@ -5,31 +5,15 @@ """ import logging - - -def get_version() -> str: - """Get the version of the Google Generative AI SDK, or 'unknown' if not found - - Attempts to retrieve the installed version of the Google Generative AI SDK using importlib.metadata. - Falls back to 'unknown' if the version cannot be determined. - - Returns: - The version string of the Google Generative AI SDK or 'unknown' - """ - try: - from importlib.metadata import version - - return version("google-genai") - except ImportError: - logger.debug("Could not find Google Generative AI SDK version") - return "unknown" - - -LIBRARY_NAME = "google-genai" -LIBRARY_VERSION: str = get_version() +from agentops.instrumentation.common import LibraryInfo logger = logging.getLogger(__name__) +# Library information +_library_info = LibraryInfo(name="google-genai") +LIBRARY_NAME = _library_info.name +LIBRARY_VERSION = _library_info.version + # Import after defining constants to avoid circular imports from agentops.instrumentation.google_genai.instrumentor import GoogleGenAIInstrumentor # noqa: E402 diff --git a/agentops/instrumentation/ibm_watsonx_ai/__init__.py b/agentops/instrumentation/ibm_watsonx_ai/__init__.py index a5eaee1a7..56eeb9def 100644 --- a/agentops/instrumentation/ibm_watsonx_ai/__init__.py +++ b/agentops/instrumentation/ibm_watsonx_ai/__init__.py @@ -5,24 +5,18 @@ """ import logging +from agentops.instrumentation.common import LibraryInfo logger = logging.getLogger(__name__) - -def get_version() -> str: - """Get the version of the IBM watsonx.ai SDK, or 'unknown' if not found.""" - try: - from importlib.metadata import version - - return version("ibm-watsonx-ai") - except ImportError: - logger.debug("Could not find IBM WatsonX AI SDK version") - return "1.3.11" # Default to known supported version if not found - - -# Library identification for instrumentation -LIBRARY_NAME = "ibm_watsonx_ai" -LIBRARY_VERSION = get_version() +# Library information +_library_info = LibraryInfo( + name="ibm_watsonx_ai", + package_name="ibm-watsonx-ai", + default_version="1.3.11", # Default to known supported version if not found +) +LIBRARY_NAME = _library_info.name +LIBRARY_VERSION = _library_info.version # Import after defining constants to avoid circular imports from agentops.instrumentation.ibm_watsonx_ai.instrumentor import IBMWatsonXInstrumentor # noqa: E402 diff --git a/agentops/instrumentation/mem0/__init__.py b/agentops/instrumentation/mem0/__init__.py index ababf20c9..2a8309abb 100644 --- a/agentops/instrumentation/mem0/__init__.py +++ b/agentops/instrumentation/mem0/__init__.py @@ -5,6 +5,7 @@ """ import logging +from agentops.instrumentation.common import LibraryInfo # Import memory operation wrappers from .memory import ( @@ -18,22 +19,13 @@ mem0_history_wrapper, ) - -def get_version() -> str: - try: - from importlib.metadata import version - - return version("mem0ai") - except ImportError: - logger.debug("Could not find Mem0 SDK version") - return "unknown" - - -LIBRARY_NAME = "agentops.instrumentation.mem0" -LIBRARY_VERSION = "1.0.0" - logger = logging.getLogger(__name__) +# Library information +_library_info = LibraryInfo(name="agentops.instrumentation.mem0", package_name="mem0ai") +LIBRARY_NAME = _library_info.name +LIBRARY_VERSION = "1.0.0" # Internal version for instrumentation + # Import after defining constants to avoid circular imports from agentops.instrumentation.mem0.instrumentor import Mem0Instrumentor # noqa: E402 diff --git a/agentops/instrumentation/openai/__init__.py b/agentops/instrumentation/openai/__init__.py index b31a32645..3e160530b 100644 --- a/agentops/instrumentation/openai/__init__.py +++ b/agentops/instrumentation/openai/__init__.py @@ -4,22 +4,12 @@ extending the third-party instrumentation to add support for OpenAI responses. """ -from agentops.logging import logger +from agentops.instrumentation.common import LibraryInfo - -def get_version() -> str: - """Get the version of the agents SDK, or 'unknown' if not found""" - try: - from importlib.metadata import version - - return version("openai") - except ImportError: - logger.debug("Could not find OpenAI Agents SDK version") - return "unknown" - - -LIBRARY_NAME = "openai" -LIBRARY_VERSION: str = get_version() +# Library information +_library_info = LibraryInfo(name="openai") +LIBRARY_NAME = _library_info.name +LIBRARY_VERSION = _library_info.version # Import after defining constants to avoid circular imports from agentops.instrumentation.openai.instrumentor import OpenAIInstrumentor # noqa: E402 diff --git a/agentops/instrumentation/openai_agents/__init__.py b/agentops/instrumentation/openai_agents/__init__.py index 74a819267..9d513971c 100644 --- a/agentops/instrumentation/openai_agents/__init__.py +++ b/agentops/instrumentation/openai_agents/__init__.py @@ -13,22 +13,12 @@ agentops.instrumentation.openai. """ -from agentops.logging import logger +from agentops.instrumentation.common import LibraryInfo - -def get_version() -> str: - """Get the version of the agents SDK, or 'unknown' if not found""" - try: - from importlib.metadata import version - - return version("openai-agents") - except ImportError: - logger.debug("Could not find OpenAI Agents SDK version") - return "unknown" - - -LIBRARY_NAME = "openai-agents" -LIBRARY_VERSION: str = get_version() +# Library information +_library_info = LibraryInfo(name="openai-agents") +LIBRARY_NAME = _library_info.name +LIBRARY_VERSION = _library_info.version # Import after defining constants to avoid circular imports from agentops.instrumentation.openai_agents.instrumentor import OpenAIAgentsInstrumentor # noqa: E402 diff --git a/agentops/instrumentation/openai_agents/attributes/common.py b/agentops/instrumentation/openai_agents/attributes/common.py index 0614ef5a5..be64fa3bf 100644 --- a/agentops/instrumentation/openai_agents/attributes/common.py +++ b/agentops/instrumentation/openai_agents/attributes/common.py @@ -233,7 +233,7 @@ def get_function_span_attributes(span_data: Any) -> AttributeMap: pass if hasattr(span_data, "from_agent") and span_data.from_agent: - attributes[f"{AgentAttributes.AGENT}.calling_tool.name"] = str(span_data.from_agent) + attributes["agent.calling_tool.name"] = str(span_data.from_agent) return attributes diff --git a/agentops/instrumentation/smolagents/__init__.py b/agentops/instrumentation/smolagents/__init__.py index 7eeda90f7..31bab7d70 100644 --- a/agentops/instrumentation/smolagents/__init__.py +++ b/agentops/instrumentation/smolagents/__init__.py @@ -1,7 +1,11 @@ """SmoLAgents instrumentation for AgentOps.""" -LIBRARY_NAME = "smolagents" -LIBRARY_VERSION = "1.16.0" +from agentops.instrumentation.common import LibraryInfo + +# Library information +_library_info = LibraryInfo(name="smolagents", default_version="1.16.0") +LIBRARY_NAME = _library_info.name +LIBRARY_VERSION = _library_info.version from agentops.instrumentation.smolagents.instrumentor import SmolAgentsInstrumentor # noqa: E402 diff --git a/agentops/semconv/agent.py b/agentops/semconv/agent.py index e096bd479..59e345253 100644 --- a/agentops/semconv/agent.py +++ b/agentops/semconv/agent.py @@ -8,7 +8,6 @@ class AgentAttributes: AGENT_ID = "agent.id" # Unique identifier for the agent AGENT_NAME = "agent.name" # Name of the agent AGENT_ROLE = "agent.role" # Role of the agent - AGENT = "agent" # Root prefix for agent attributes # Capabilities AGENT_TOOLS = "agent.tools" # Tools available to the agent diff --git a/agentops/semconv/core.py b/agentops/semconv/core.py index d6b6d9022..5d0383102 100644 --- a/agentops/semconv/core.py +++ b/agentops/semconv/core.py @@ -8,19 +8,12 @@ class CoreAttributes: ERROR_TYPE = "error.type" # Type of error if status is error ERROR_MESSAGE = "error.message" # Error message if status is error - IN_FLIGHT = "agentops.in-flight" # Whether the span is in-flight - EXPORT_IMMEDIATELY = "agentops.export.immediate" # Whether the span should be exported immediately - TAGS = "agentops.tags" # Tags passed to agentops.init # Trace context attributes TRACE_ID = "trace.id" # Trace ID SPAN_ID = "span.id" # Span ID PARENT_ID = "parent.id" # Parent ID - PARENT_SPAN_ID = "parent.span.id" # Parent span ID - PARENT_TRACE_ID = "parent.trace.id" # Parent trace ID - PARENT_SPAN_KIND = "parent.span.kind" # Parent span kind - PARENT_SPAN_NAME = "parent.span.name" # Parent span name GROUP_ID = "group.id" # Group ID # Note: WORKFLOW_NAME is defined in WorkflowAttributes to avoid duplication diff --git a/agentops/semconv/instrumentation.py b/agentops/semconv/instrumentation.py index 5fb672c75..794c05ffe 100644 --- a/agentops/semconv/instrumentation.py +++ b/agentops/semconv/instrumentation.py @@ -11,4 +11,3 @@ class InstrumentationAttributes: LIBRARY_VERSION = "library.version" # Version of the library INSTRUMENTATION_TYPE = "instrumentation.type" # Type of instrumentation - INSTRUMENTATION_PROVIDER = "instrumentation.provider" # Provider of the instrumentation diff --git a/agentops/semconv/span_kinds.py b/agentops/semconv/span_kinds.py index 5a75f6d76..75b3c6b97 100644 --- a/agentops/semconv/span_kinds.py +++ b/agentops/semconv/span_kinds.py @@ -3,19 +3,9 @@ from enum import Enum -class SpanKind: - """Defines the kinds of spans in AgentOps.""" - - # Agent action kinds - AGENT_ACTION = "agent.action" # Agent performing an action - AGENT_THINKING = "agent.thinking" # Agent reasoning/planning - AGENT_DECISION = "agent.decision" # Agent making a decision - - # LLM interaction kinds - LLM_CALL = "llm.call" # LLM API call +class AgentOpsSpanKindValues(Enum): + """Standard span kind values for AgentOps.""" - # Workflow kinds - WORKFLOW_STEP = "workflow.step" # Step in a workflow WORKFLOW = "workflow" SESSION = "session" TASK = "task" @@ -24,17 +14,35 @@ class SpanKind: TOOL = "tool" LLM = "llm" TEAM = "team" - UNKNOWN = "unknown" CHAIN = "chain" TEXT = "text" GUARDRAIL = "guardrail" + UNKNOWN = "unknown" -class AgentOpsSpanKindValues(Enum): - WORKFLOW = "workflow" - TASK = "task" - AGENT = "agent" - TOOL = "tool" - LLM = "llm" - TEAM = "team" - UNKNOWN = "unknown" +# Legacy SpanKind class for backward compatibility +class SpanKind: + """Legacy span kind definitions - use AgentOpsSpanKindValues instead.""" + + # Agent action kinds + AGENT_ACTION = "agent.action" # Agent performing an action + AGENT_THINKING = "agent.thinking" # Agent reasoning/planning + AGENT_DECISION = "agent.decision" # Agent making a decision + + # LLM interaction kinds + LLM_CALL = "llm.call" # LLM API call + + # Workflow kinds + WORKFLOW_STEP = "workflow.step" # Step in a workflow + WORKFLOW = AgentOpsSpanKindValues.WORKFLOW.value + SESSION = AgentOpsSpanKindValues.SESSION.value + TASK = AgentOpsSpanKindValues.TASK.value + OPERATION = AgentOpsSpanKindValues.OPERATION.value + AGENT = AgentOpsSpanKindValues.AGENT.value + TOOL = AgentOpsSpanKindValues.TOOL.value + LLM = AgentOpsSpanKindValues.LLM.value + TEAM = AgentOpsSpanKindValues.TEAM.value + UNKNOWN = AgentOpsSpanKindValues.UNKNOWN.value + CHAIN = AgentOpsSpanKindValues.CHAIN.value + TEXT = AgentOpsSpanKindValues.TEXT.value + GUARDRAIL = AgentOpsSpanKindValues.GUARDRAIL.value diff --git a/agentops/semconv/workflow.py b/agentops/semconv/workflow.py index 2bc46e0a7..d0d506d8d 100644 --- a/agentops/semconv/workflow.py +++ b/agentops/semconv/workflow.py @@ -18,14 +18,9 @@ class WorkflowAttributes: WORKFLOW_OUTPUT_TYPE = "workflow.output.type" # Type of output from the workflow WORKFLOW_FINAL_OUTPUT = "workflow.final_output" # Final output of the workflow - # Workflow step attributes - WORKFLOW_STEP = "workflow.step" + # Workflow step attributes (only keep used ones) WORKFLOW_STEP_TYPE = "workflow.step.type" # Type of workflow step - WORKFLOW_STEP_NAME = "workflow.step.name" # Name of the workflow step - WORKFLOW_STEP_INPUT = "workflow.step.input" # Input to the workflow step - WORKFLOW_STEP_OUTPUT = "workflow.step.output" # Output from the workflow step WORKFLOW_STEP_STATUS = "workflow.step.status" # Status of the workflow step - WORKFLOW_STEP_ERROR = "workflow.step.error" # Error from the workflow step # Configuration WORKFLOW_MAX_TURNS = "workflow.max_turns" # Maximum number of turns in a workflow From 596e979d87afee92d1bc7f5f973dee9d5cf90c98 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Wed, 18 Jun 2025 15:36:47 +0530 Subject: [PATCH 3/8] Cleanup --- agentops/instrumentation/__init__.py | 34 ++++--- .../{ => agentic}/ag2/__init__.py | 2 +- .../{ => agentic}/ag2/instrumentor.py | 2 +- .../{ => agentic}/agno/__init__.py | 0 .../{ => agentic}/agno/attributes/__init__.py | 0 .../{ => agentic}/agno/attributes/agent.py | 0 .../{ => agentic}/agno/attributes/metrics.py | 0 .../{ => agentic}/agno/attributes/team.py | 0 .../{ => agentic}/agno/attributes/tool.py | 0 .../{ => agentic}/agno/attributes/workflow.py | 0 .../{ => agentic}/agno/instrumentor.py | 50 +++++------ .../{ => agentic}/crewai/LICENSE | 0 .../{ => agentic}/crewai/NOTICE.md | 0 .../agentic/crewai/__init__.py | 6 ++ .../crewai/crewai_span_attributes.py | 0 .../{ => agentic}/crewai/instrumentation.py | 4 +- .../{ => agentic}/crewai/version.py | 0 .../{ => agentic}/google_adk/__init__.py | 4 +- .../agentic/google_adk/instrumentor.py | 68 ++++++++++++++ .../{ => agentic}/google_adk/patch.py | 0 .../{ => agentic}/openai_agents/README.md | 0 .../{ => agentic}/openai_agents/SPANS.md | 0 .../openai_agents/TRACING_API.md | 0 .../{ => agentic}/openai_agents/__init__.py | 2 +- .../openai_agents/attributes/__init__.py | 0 .../openai_agents/attributes/common.py | 8 +- .../openai_agents/attributes/completion.py | 2 +- .../openai_agents/attributes/model.py | 0 .../openai_agents/attributes/tokens.py | 0 .../{ => agentic}/openai_agents/exporter.py | 4 +- .../openai_agents/instrumentor.py | 6 +- .../{ => agentic}/openai_agents/processor.py | 0 .../{ => agentic}/smolagents/README.md | 0 .../{ => agentic}/smolagents/__init__.py | 2 +- .../smolagents/attributes/agent.py | 0 .../smolagents/attributes/model.py | 0 .../{ => agentic}/smolagents/instrumentor.py | 73 +++++++-------- .../smolagents/stream_wrapper.py | 0 agentops/instrumentation/crewai/__init__.py | 6 -- .../google_adk/instrumentor.py | 88 ------------------- .../{ => providers}/anthropic/__init__.py | 2 +- .../anthropic/attributes/__init__.py | 9 +- .../anthropic/attributes/common.py | 2 +- .../anthropic/attributes/message.py | 4 +- .../anthropic/attributes/tools.py | 0 .../anthropic/event_handler_wrapper.py | 0 .../{ => providers}/anthropic/instrumentor.py | 9 +- .../anthropic/stream_wrapper.py | 4 +- .../{ => providers}/google_genai/README.md | 0 .../{ => providers}/google_genai/__init__.py | 2 +- .../google_genai/attributes/__init__.py | 6 +- .../google_genai/attributes/chat.py | 4 +- .../google_genai/attributes/common.py | 2 +- .../google_genai/attributes/model.py | 2 +- .../google_genai/instrumentor.py | 60 +++++-------- .../google_genai/stream_wrapper.py | 4 +- .../ibm_watsonx_ai/__init__.py | 2 +- .../ibm_watsonx_ai/attributes/__init__.py | 4 +- .../ibm_watsonx_ai/attributes/attributes.py | 2 +- .../ibm_watsonx_ai/attributes/common.py | 0 .../ibm_watsonx_ai/instrumentor.py | 58 ++++++------ .../ibm_watsonx_ai/stream_wrapper.py | 4 +- .../{ => providers}/mem0/__init__.py | 2 +- .../{ => providers}/mem0/common.py | 0 .../{ => providers}/mem0/instrumentor.py | 81 ++++++----------- .../{ => providers}/mem0/memory.py | 0 .../{ => providers}/openai/__init__.py | 2 +- .../openai/attributes/__init__.py | 0 .../openai/attributes/common.py | 4 +- .../openai/attributes/response.py | 0 .../openai/attributes/tools.py | 0 .../{ => providers}/openai/config.py | 0 .../{ => providers}/openai/instrumentor.py | 12 +-- .../{ => providers}/openai/utils.py | 2 +- .../{ => providers}/openai/v0.py | 6 +- .../{ => providers}/openai/v0_wrappers.py | 4 +- .../openai/wrappers/__init__.py | 10 +-- .../openai/wrappers/assistant.py | 6 +- .../{ => providers}/openai/wrappers/chat.py | 4 +- .../openai/wrappers/completion.py | 4 +- .../openai/wrappers/embeddings.py | 4 +- .../openai/wrappers/image_gen.py | 2 +- .../{ => providers}/openai/wrappers/shared.py | 4 +- .../concurrent_futures/__init__.py | 0 .../concurrent_futures/instrumentation.py | 0 examples/anthropic/anthropic-example-async.py | 2 +- examples/google_adk/human_approval.py | 3 +- examples/openai/openai_example_async.py | 3 +- examples/openai_agents/agent_patterns.py | 4 +- .../smolagents/multi_smolagents_system.py | 4 +- .../anthropic/test_attributes.py | 6 +- .../anthropic/test_event_handler.py | 2 +- .../anthropic/test_instrumentor.py | 12 +-- .../anthropic/test_stream_wrapper.py | 2 +- .../openai_agents/test_openai_agents.py | 14 +-- .../test_openai_agents_attributes.py | 16 ++-- .../openai_core/test_common_attributes.py | 12 +-- .../openai_core/test_instrumentor.py | 2 +- .../openai_core/test_response_attributes.py | 75 ++++++++++------ 99 files changed, 412 insertions(+), 433 deletions(-) rename agentops/instrumentation/{ => agentic}/ag2/__init__.py (86%) rename agentops/instrumentation/{ => agentic}/ag2/instrumentor.py (99%) rename agentops/instrumentation/{ => agentic}/agno/__init__.py (100%) rename agentops/instrumentation/{ => agentic}/agno/attributes/__init__.py (100%) rename agentops/instrumentation/{ => agentic}/agno/attributes/agent.py (100%) rename agentops/instrumentation/{ => agentic}/agno/attributes/metrics.py (100%) rename agentops/instrumentation/{ => agentic}/agno/attributes/team.py (100%) rename agentops/instrumentation/{ => agentic}/agno/attributes/tool.py (100%) rename agentops/instrumentation/{ => agentic}/agno/attributes/workflow.py (100%) rename agentops/instrumentation/{ => agentic}/agno/instrumentor.py (97%) rename agentops/instrumentation/{ => agentic}/crewai/LICENSE (100%) rename agentops/instrumentation/{ => agentic}/crewai/NOTICE.md (100%) create mode 100644 agentops/instrumentation/agentic/crewai/__init__.py rename agentops/instrumentation/{ => agentic}/crewai/crewai_span_attributes.py (100%) rename agentops/instrumentation/{ => agentic}/crewai/instrumentation.py (99%) rename agentops/instrumentation/{ => agentic}/crewai/version.py (100%) rename agentops/instrumentation/{ => agentic}/google_adk/__init__.py (76%) create mode 100644 agentops/instrumentation/agentic/google_adk/instrumentor.py rename agentops/instrumentation/{ => agentic}/google_adk/patch.py (100%) rename agentops/instrumentation/{ => agentic}/openai_agents/README.md (100%) rename agentops/instrumentation/{ => agentic}/openai_agents/SPANS.md (100%) rename agentops/instrumentation/{ => agentic}/openai_agents/TRACING_API.md (100%) rename agentops/instrumentation/{ => agentic}/openai_agents/__init__.py (90%) rename agentops/instrumentation/{ => agentic}/openai_agents/attributes/__init__.py (100%) rename agentops/instrumentation/{ => agentic}/openai_agents/attributes/common.py (98%) rename agentops/instrumentation/{ => agentic}/openai_agents/attributes/completion.py (98%) rename agentops/instrumentation/{ => agentic}/openai_agents/attributes/model.py (100%) rename agentops/instrumentation/{ => agentic}/openai_agents/attributes/tokens.py (100%) rename agentops/instrumentation/{ => agentic}/openai_agents/exporter.py (99%) rename agentops/instrumentation/{ => agentic}/openai_agents/instrumentor.py (94%) rename agentops/instrumentation/{ => agentic}/openai_agents/processor.py (100%) rename agentops/instrumentation/{ => agentic}/smolagents/README.md (100%) rename agentops/instrumentation/{ => agentic}/smolagents/__init__.py (74%) rename agentops/instrumentation/{ => agentic}/smolagents/attributes/agent.py (100%) rename agentops/instrumentation/{ => agentic}/smolagents/attributes/model.py (100%) rename agentops/instrumentation/{ => agentic}/smolagents/instrumentor.py (83%) rename agentops/instrumentation/{ => agentic}/smolagents/stream_wrapper.py (100%) delete mode 100644 agentops/instrumentation/crewai/__init__.py delete mode 100644 agentops/instrumentation/google_adk/instrumentor.py rename agentops/instrumentation/{ => providers}/anthropic/__init__.py (84%) rename agentops/instrumentation/{ => providers}/anthropic/attributes/__init__.py (55%) rename agentops/instrumentation/{ => providers}/anthropic/attributes/common.py (95%) rename agentops/instrumentation/{ => providers}/anthropic/attributes/message.py (99%) rename agentops/instrumentation/{ => providers}/anthropic/attributes/tools.py (100%) rename agentops/instrumentation/{ => providers}/anthropic/event_handler_wrapper.py (100%) rename agentops/instrumentation/{ => providers}/anthropic/instrumentor.py (94%) rename agentops/instrumentation/{ => providers}/anthropic/stream_wrapper.py (99%) rename agentops/instrumentation/{ => providers}/google_genai/README.md (100%) rename agentops/instrumentation/{ => providers}/google_genai/__init__.py (84%) rename agentops/instrumentation/{ => providers}/google_genai/attributes/__init__.py (70%) rename agentops/instrumentation/{ => providers}/google_genai/attributes/chat.py (96%) rename agentops/instrumentation/{ => providers}/google_genai/attributes/common.py (97%) rename agentops/instrumentation/{ => providers}/google_genai/attributes/model.py (99%) rename agentops/instrumentation/{ => providers}/google_genai/instrumentor.py (74%) rename agentops/instrumentation/{ => providers}/google_genai/stream_wrapper.py (98%) rename agentops/instrumentation/{ => providers}/ibm_watsonx_ai/__init__.py (87%) rename agentops/instrumentation/{ => providers}/ibm_watsonx_ai/attributes/__init__.py (79%) rename agentops/instrumentation/{ => providers}/ibm_watsonx_ai/attributes/attributes.py (99%) rename agentops/instrumentation/{ => providers}/ibm_watsonx_ai/attributes/common.py (100%) rename agentops/instrumentation/{ => providers}/ibm_watsonx_ai/instrumentor.py (77%) rename agentops/instrumentation/{ => providers}/ibm_watsonx_ai/stream_wrapper.py (99%) rename agentops/instrumentation/{ => providers}/mem0/__init__.py (92%) rename agentops/instrumentation/{ => providers}/mem0/common.py (100%) rename agentops/instrumentation/{ => providers}/mem0/instrumentor.py (77%) rename agentops/instrumentation/{ => providers}/mem0/memory.py (100%) rename agentops/instrumentation/{ => providers}/openai/__init__.py (84%) rename agentops/instrumentation/{ => providers}/openai/attributes/__init__.py (100%) rename agentops/instrumentation/{ => providers}/openai/attributes/common.py (92%) rename agentops/instrumentation/{ => providers}/openai/attributes/response.py (100%) rename agentops/instrumentation/{ => providers}/openai/attributes/tools.py (100%) rename agentops/instrumentation/{ => providers}/openai/config.py (100%) rename agentops/instrumentation/{ => providers}/openai/instrumentor.py (95%) rename agentops/instrumentation/{ => providers}/openai/utils.py (93%) rename agentops/instrumentation/{ => providers}/openai/v0.py (96%) rename agentops/instrumentation/{ => providers}/openai/v0_wrappers.py (99%) rename agentops/instrumentation/{ => providers}/openai/wrappers/__init__.py (56%) rename agentops/instrumentation/{ => providers}/openai/wrappers/assistant.py (98%) rename agentops/instrumentation/{ => providers}/openai/wrappers/chat.py (98%) rename agentops/instrumentation/{ => providers}/openai/wrappers/completion.py (96%) rename agentops/instrumentation/{ => providers}/openai/wrappers/embeddings.py (96%) rename agentops/instrumentation/{ => providers}/openai/wrappers/image_gen.py (96%) rename agentops/instrumentation/{ => providers}/openai/wrappers/shared.py (93%) rename agentops/instrumentation/{ => utilities}/concurrent_futures/__init__.py (100%) rename agentops/instrumentation/{ => utilities}/concurrent_futures/instrumentation.py (100%) diff --git a/agentops/instrumentation/__init__.py b/agentops/instrumentation/__init__.py index e902262df..54ae6ee5e 100644 --- a/agentops/instrumentation/__init__.py +++ b/agentops/instrumentation/__init__.py @@ -47,28 +47,28 @@ class InstrumentorConfig(TypedDict): # Configuration for supported LLM providers PROVIDERS: dict[str, InstrumentorConfig] = { "openai": { - "module_name": "agentops.instrumentation.openai", + "module_name": "agentops.instrumentation.providers.openai", "class_name": "OpenAIInstrumentor", "min_version": "1.0.0", }, "anthropic": { - "module_name": "agentops.instrumentation.anthropic", + "module_name": "agentops.instrumentation.providers.anthropic", "class_name": "AnthropicInstrumentor", "min_version": "0.32.0", }, "ibm_watsonx_ai": { - "module_name": "agentops.instrumentation.ibm_watsonx_ai", + "module_name": "agentops.instrumentation.providers.ibm_watsonx_ai", "class_name": "IBMWatsonXInstrumentor", "min_version": "0.1.0", }, "google.genai": { - "module_name": "agentops.instrumentation.google_genai", + "module_name": "agentops.instrumentation.providers.google_genai", "class_name": "GoogleGenAIInstrumentor", "min_version": "0.1.0", "package_name": "google-genai", # Actual pip package name }, "mem0": { - "module_name": "agentops.instrumentation.mem0", + "module_name": "agentops.instrumentation.providers.mem0", "class_name": "Mem0Instrumentor", "min_version": "0.1.0", "package_name": "mem0ai", @@ -78,7 +78,7 @@ class InstrumentorConfig(TypedDict): # Configuration for utility instrumentors UTILITY_INSTRUMENTORS: dict[str, InstrumentorConfig] = { "concurrent.futures": { - "module_name": "agentops.instrumentation.concurrent_futures", + "module_name": "agentops.instrumentation.utilities.concurrent_futures", "class_name": "ConcurrentFuturesInstrumentor", "min_version": "3.7.0", # Python 3.7+ (concurrent.futures is stdlib) "package_name": "python", # Special case for stdlib modules @@ -88,21 +88,35 @@ class InstrumentorConfig(TypedDict): # Configuration for supported agentic libraries AGENTIC_LIBRARIES: dict[str, InstrumentorConfig] = { "crewai": { - "module_name": "agentops.instrumentation.crewai", + "module_name": "agentops.instrumentation.agentic.crewai", "class_name": "CrewAIInstrumentor", "min_version": "0.56.0", }, - "autogen": {"module_name": "agentops.instrumentation.ag2", "class_name": "AG2Instrumentor", "min_version": "0.3.2"}, + "autogen": { + "module_name": "agentops.instrumentation.agentic.ag2", + "class_name": "AG2Instrumentor", + "min_version": "0.3.2", + }, "agents": { - "module_name": "agentops.instrumentation.openai_agents", + "module_name": "agentops.instrumentation.agentic.openai_agents", "class_name": "OpenAIAgentsInstrumentor", "min_version": "0.0.1", }, "google.adk": { - "module_name": "agentops.instrumentation.google_adk", + "module_name": "agentops.instrumentation.agentic.google_adk", "class_name": "GoogleADKInstrumentor", "min_version": "0.1.0", }, + "agno": { + "module_name": "agentops.instrumentation.agentic.agno", + "class_name": "AgnoInstrumentor", + "min_version": "0.1.0", + }, + "smolagents": { + "module_name": "agentops.instrumentation.agentic.smolagents", + "class_name": "SmolAgentsInstrumentor", + "min_version": "1.0.0", + }, } # Combine all target packages for monitoring diff --git a/agentops/instrumentation/ag2/__init__.py b/agentops/instrumentation/agentic/ag2/__init__.py similarity index 86% rename from agentops/instrumentation/ag2/__init__.py rename to agentops/instrumentation/agentic/ag2/__init__.py index ddfc1c8a9..226628545 100644 --- a/agentops/instrumentation/ag2/__init__.py +++ b/agentops/instrumentation/agentic/ag2/__init__.py @@ -13,6 +13,6 @@ LIBRARY_VERSION = _library_info.version # Import after defining constants to avoid circular imports -from agentops.instrumentation.ag2.instrumentor import AG2Instrumentor # noqa: E402 +from agentops.instrumentation.agentic.ag2.instrumentor import AG2Instrumentor # noqa: E402 __all__ = ["AG2Instrumentor", "LIBRARY_NAME", "LIBRARY_VERSION"] diff --git a/agentops/instrumentation/ag2/instrumentor.py b/agentops/instrumentation/agentic/ag2/instrumentor.py similarity index 99% rename from agentops/instrumentation/ag2/instrumentor.py rename to agentops/instrumentation/agentic/ag2/instrumentor.py index ff2cdd6fe..8a6558970 100644 --- a/agentops/instrumentation/ag2/instrumentor.py +++ b/agentops/instrumentation/agentic/ag2/instrumentor.py @@ -20,7 +20,7 @@ create_span, SpanAttributeManager, ) -from agentops.instrumentation.ag2 import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.agentic.ag2 import LIBRARY_NAME, LIBRARY_VERSION from agentops.semconv.message import MessageAttributes from agentops.semconv.span_attributes import SpanAttributes from agentops.semconv.agent import AgentAttributes diff --git a/agentops/instrumentation/agno/__init__.py b/agentops/instrumentation/agentic/agno/__init__.py similarity index 100% rename from agentops/instrumentation/agno/__init__.py rename to agentops/instrumentation/agentic/agno/__init__.py diff --git a/agentops/instrumentation/agno/attributes/__init__.py b/agentops/instrumentation/agentic/agno/attributes/__init__.py similarity index 100% rename from agentops/instrumentation/agno/attributes/__init__.py rename to agentops/instrumentation/agentic/agno/attributes/__init__.py diff --git a/agentops/instrumentation/agno/attributes/agent.py b/agentops/instrumentation/agentic/agno/attributes/agent.py similarity index 100% rename from agentops/instrumentation/agno/attributes/agent.py rename to agentops/instrumentation/agentic/agno/attributes/agent.py diff --git a/agentops/instrumentation/agno/attributes/metrics.py b/agentops/instrumentation/agentic/agno/attributes/metrics.py similarity index 100% rename from agentops/instrumentation/agno/attributes/metrics.py rename to agentops/instrumentation/agentic/agno/attributes/metrics.py diff --git a/agentops/instrumentation/agno/attributes/team.py b/agentops/instrumentation/agentic/agno/attributes/team.py similarity index 100% rename from agentops/instrumentation/agno/attributes/team.py rename to agentops/instrumentation/agentic/agno/attributes/team.py diff --git a/agentops/instrumentation/agno/attributes/tool.py b/agentops/instrumentation/agentic/agno/attributes/tool.py similarity index 100% rename from agentops/instrumentation/agno/attributes/tool.py rename to agentops/instrumentation/agentic/agno/attributes/tool.py diff --git a/agentops/instrumentation/agno/attributes/workflow.py b/agentops/instrumentation/agentic/agno/attributes/workflow.py similarity index 100% rename from agentops/instrumentation/agno/attributes/workflow.py rename to agentops/instrumentation/agentic/agno/attributes/workflow.py diff --git a/agentops/instrumentation/agno/instrumentor.py b/agentops/instrumentation/agentic/agno/instrumentor.py similarity index 97% rename from agentops/instrumentation/agno/instrumentor.py rename to agentops/instrumentation/agentic/agno/instrumentor.py index 2394d1189..3576a6bdb 100644 --- a/agentops/instrumentation/agno/instrumentor.py +++ b/agentops/instrumentation/agentic/agno/instrumentor.py @@ -17,24 +17,26 @@ parent-child span relationships. """ -from typing import List, Collection, Any, Optional +from typing import List, Any, Optional, Dict from opentelemetry import trace, context as otel_context from opentelemetry.trace import Status, StatusCode +from opentelemetry.metrics import Meter import threading from agentops.logging import logger from agentops.instrumentation.common import ( BaseAgentOpsInstrumentor, StandardMetrics, + InstrumentorConfig, ) from agentops.instrumentation.common.wrappers import WrapConfig # Import attribute handlers -from agentops.instrumentation.agno.attributes.agent import get_agent_run_attributes -from agentops.instrumentation.agno.attributes.team import get_team_run_attributes -from agentops.instrumentation.agno.attributes.tool import get_tool_execution_attributes -from agentops.instrumentation.agno.attributes.metrics import get_metrics_attributes -from agentops.instrumentation.agno.attributes.workflow import ( +from agentops.instrumentation.agentic.agno.attributes.agent import get_agent_run_attributes +from agentops.instrumentation.agentic.agno.attributes.team import get_team_run_attributes +from agentops.instrumentation.agentic.agno.attributes.tool import get_tool_execution_attributes +from agentops.instrumentation.agentic.agno.attributes.metrics import get_metrics_attributes +from agentops.instrumentation.agentic.agno.attributes.workflow import ( get_workflow_run_attributes, get_workflow_session_attributes, ) @@ -915,16 +917,17 @@ class AgnoInstrumentor(BaseAgentOpsInstrumentor): def __init__(self): """Initialize the Agno instrumentor.""" - super().__init__( - name="agno", - version="0.1.0", + # Create instrumentor config + config = InstrumentorConfig( library_name="agentops.instrumentation.agno", + library_version="0.1.0", + wrapped_methods=[], # We'll populate this in _get_wrapped_methods + metrics_enabled=True, + dependencies=["agno >= 0.1.0"], ) - self._streaming_context_manager = StreamingContextManager() - def instrumentation_dependencies(self) -> Collection[str]: - """Returns list of packages required for instrumentation.""" - return ["agno >= 0.1.0"] + super().__init__(config) + self._streaming_context_manager = StreamingContextManager() def _get_wrapped_methods(self) -> List[WrapConfig]: """Return list of methods to be wrapped.""" @@ -1022,25 +1025,22 @@ def _get_wrapped_methods(self) -> List[WrapConfig]: return wrapped_methods - def _instrument(self, **kwargs): - """Install instrumentation for Agno.""" - # Call parent implementation - super()._instrument(**kwargs) + def _create_metrics(self, meter: Meter) -> Dict[str, Any]: + """Create metrics for the instrumentor. + Returns a dictionary of metric name to metric instance. + """ # Create standard metrics for LLM operations - self._metrics = StandardMetrics(self._meter) - self._metrics.create_llm_metrics(system_name="agno", operation_description="Agno agent operation") + return StandardMetrics.create_standard_metrics(meter) + def _initialize(self, **kwargs): + """Perform custom initialization.""" logger.info("Agno instrumentation installed successfully") - def _uninstrument(self, **kwargs): - """Remove instrumentation for Agno.""" + def _custom_unwrap(self, **kwargs): + """Perform custom unwrapping.""" # Clear streaming contexts self._streaming_context_manager.clear_all() - - # Call parent implementation - super()._uninstrument(**kwargs) - logger.info("Agno instrumentation removed successfully") # Method wrappers converted to instance methods diff --git a/agentops/instrumentation/crewai/LICENSE b/agentops/instrumentation/agentic/crewai/LICENSE similarity index 100% rename from agentops/instrumentation/crewai/LICENSE rename to agentops/instrumentation/agentic/crewai/LICENSE diff --git a/agentops/instrumentation/crewai/NOTICE.md b/agentops/instrumentation/agentic/crewai/NOTICE.md similarity index 100% rename from agentops/instrumentation/crewai/NOTICE.md rename to agentops/instrumentation/agentic/crewai/NOTICE.md diff --git a/agentops/instrumentation/agentic/crewai/__init__.py b/agentops/instrumentation/agentic/crewai/__init__.py new file mode 100644 index 000000000..692478d0e --- /dev/null +++ b/agentops/instrumentation/agentic/crewai/__init__.py @@ -0,0 +1,6 @@ +"""OpenTelemetry CrewAI instrumentation""" + +from agentops.instrumentation.agentic.crewai.version import __version__ +from agentops.instrumentation.agentic.crewai.instrumentation import CrewAIInstrumentor + +__all__ = ["CrewAIInstrumentor", "__version__"] diff --git a/agentops/instrumentation/crewai/crewai_span_attributes.py b/agentops/instrumentation/agentic/crewai/crewai_span_attributes.py similarity index 100% rename from agentops/instrumentation/crewai/crewai_span_attributes.py rename to agentops/instrumentation/agentic/crewai/crewai_span_attributes.py diff --git a/agentops/instrumentation/crewai/instrumentation.py b/agentops/instrumentation/agentic/crewai/instrumentation.py similarity index 99% rename from agentops/instrumentation/crewai/instrumentation.py rename to agentops/instrumentation/agentic/crewai/instrumentation.py index dd0fc69f8..0f2102d61 100644 --- a/agentops/instrumentation/crewai/instrumentation.py +++ b/agentops/instrumentation/agentic/crewai/instrumentation.py @@ -19,10 +19,10 @@ set_token_usage_attributes, TokenUsageExtractor, ) -from agentops.instrumentation.crewai.version import __version__ +from agentops.instrumentation.agentic.crewai.version import __version__ from agentops.semconv import SpanAttributes, AgentOpsSpanKindValues, ToolAttributes, MessageAttributes from agentops.semconv.core import CoreAttributes -from agentops.instrumentation.crewai.crewai_span_attributes import CrewAISpanAttributes, set_span_attribute +from agentops.instrumentation.agentic.crewai.crewai_span_attributes import CrewAISpanAttributes, set_span_attribute from agentops import get_client # Initialize logger diff --git a/agentops/instrumentation/crewai/version.py b/agentops/instrumentation/agentic/crewai/version.py similarity index 100% rename from agentops/instrumentation/crewai/version.py rename to agentops/instrumentation/agentic/crewai/version.py diff --git a/agentops/instrumentation/google_adk/__init__.py b/agentops/instrumentation/agentic/google_adk/__init__.py similarity index 76% rename from agentops/instrumentation/google_adk/__init__.py rename to agentops/instrumentation/agentic/google_adk/__init__.py index f2d9e9542..8dda30547 100644 --- a/agentops/instrumentation/google_adk/__init__.py +++ b/agentops/instrumentation/agentic/google_adk/__init__.py @@ -13,7 +13,7 @@ LIBRARY_NAME = _library_info.name LIBRARY_VERSION = _library_info.version -from agentops.instrumentation.google_adk.instrumentor import GoogleADKInstrumentor # noqa: E402 -from agentops.instrumentation.google_adk import patch # noqa: E402 +from agentops.instrumentation.agentic.google_adk.instrumentor import GoogleADKInstrumentor # noqa: E402 +from agentops.instrumentation.agentic.google_adk import patch # noqa: E402 __all__ = ["LIBRARY_NAME", "LIBRARY_VERSION", "GoogleADKInstrumentor", "patch"] diff --git a/agentops/instrumentation/agentic/google_adk/instrumentor.py b/agentops/instrumentation/agentic/google_adk/instrumentor.py new file mode 100644 index 000000000..a14e17d6a --- /dev/null +++ b/agentops/instrumentation/agentic/google_adk/instrumentor.py @@ -0,0 +1,68 @@ +"""Google ADK Instrumentation for AgentOps + +This module provides instrumentation for Google's Agent Development Kit (ADK). +It uses a patching approach to: +1. Disable ADK's built-in telemetry to prevent duplicate spans +2. Create AgentOps spans that mirror ADK's telemetry structure +3. Extract and properly index LLM messages and tool calls +""" + +from typing import Dict, Any + +from agentops.logging import logger +from opentelemetry.metrics import Meter +from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics, InstrumentorConfig +from agentops.instrumentation.agentic.google_adk.patch import patch_adk, unpatch_adk + +# Library info for tracer/meter +LIBRARY_NAME = "agentops.instrumentation.google_adk" +LIBRARY_VERSION = "0.1.0" + + +class GoogleADKInstrumentor(BaseAgentOpsInstrumentor): + """An instrumentor for Google Agent Development Kit (ADK). + + This instrumentor patches Google ADK to: + - Prevent ADK from creating its own telemetry spans + - Create AgentOps spans for agent runs, LLM calls, and tool calls + - Properly extract and index message content and tool interactions + """ + + def __init__(self): + """Initialize the Google ADK instrumentor.""" + # Create instrumentor config + config = InstrumentorConfig( + library_name=LIBRARY_NAME, + library_version=LIBRARY_VERSION, + wrapped_methods=[], # We use patching instead of wrapping + metrics_enabled=True, + dependencies=["google-adk >= 0.1.0"], + ) + + super().__init__(config) + + def _create_metrics(self, meter: Meter) -> Dict[str, Any]: + """Create metrics for the instrumentor. + + Returns a dictionary of metric name to metric instance. + """ + # Create standard metrics for LLM operations + return StandardMetrics.create_standard_metrics(meter) + + def _custom_wrap(self, **kwargs): + """Apply custom patching for Google ADK. + + This is called after normal wrapping, but we use it for patching + since we don't have normal wrapped methods. + """ + # Apply patches with our tracer + patch_adk(self._tracer) + logger.info("Google ADK instrumentation enabled") + + def _custom_unwrap(self, **kwargs): + """Remove custom patching from Google ADK. + + This method removes all patches and restores ADK's original behavior. + """ + unpatch_adk() + logger.info("Google ADK instrumentation disabled") diff --git a/agentops/instrumentation/google_adk/patch.py b/agentops/instrumentation/agentic/google_adk/patch.py similarity index 100% rename from agentops/instrumentation/google_adk/patch.py rename to agentops/instrumentation/agentic/google_adk/patch.py diff --git a/agentops/instrumentation/openai_agents/README.md b/agentops/instrumentation/agentic/openai_agents/README.md similarity index 100% rename from agentops/instrumentation/openai_agents/README.md rename to agentops/instrumentation/agentic/openai_agents/README.md diff --git a/agentops/instrumentation/openai_agents/SPANS.md b/agentops/instrumentation/agentic/openai_agents/SPANS.md similarity index 100% rename from agentops/instrumentation/openai_agents/SPANS.md rename to agentops/instrumentation/agentic/openai_agents/SPANS.md diff --git a/agentops/instrumentation/openai_agents/TRACING_API.md b/agentops/instrumentation/agentic/openai_agents/TRACING_API.md similarity index 100% rename from agentops/instrumentation/openai_agents/TRACING_API.md rename to agentops/instrumentation/agentic/openai_agents/TRACING_API.md diff --git a/agentops/instrumentation/openai_agents/__init__.py b/agentops/instrumentation/agentic/openai_agents/__init__.py similarity index 90% rename from agentops/instrumentation/openai_agents/__init__.py rename to agentops/instrumentation/agentic/openai_agents/__init__.py index 9d513971c..531fa5b2a 100644 --- a/agentops/instrumentation/openai_agents/__init__.py +++ b/agentops/instrumentation/agentic/openai_agents/__init__.py @@ -21,7 +21,7 @@ LIBRARY_VERSION = _library_info.version # Import after defining constants to avoid circular imports -from agentops.instrumentation.openai_agents.instrumentor import OpenAIAgentsInstrumentor # noqa: E402 +from agentops.instrumentation.agentic.openai_agents.instrumentor import OpenAIAgentsInstrumentor # noqa: E402 __all__ = [ "LIBRARY_NAME", diff --git a/agentops/instrumentation/openai_agents/attributes/__init__.py b/agentops/instrumentation/agentic/openai_agents/attributes/__init__.py similarity index 100% rename from agentops/instrumentation/openai_agents/attributes/__init__.py rename to agentops/instrumentation/agentic/openai_agents/attributes/__init__.py diff --git a/agentops/instrumentation/openai_agents/attributes/common.py b/agentops/instrumentation/agentic/openai_agents/attributes/common.py similarity index 98% rename from agentops/instrumentation/openai_agents/attributes/common.py rename to agentops/instrumentation/agentic/openai_agents/attributes/common.py index be64fa3bf..18ebe8997 100644 --- a/agentops/instrumentation/openai_agents/attributes/common.py +++ b/agentops/instrumentation/agentic/openai_agents/attributes/common.py @@ -21,14 +21,14 @@ from agentops.instrumentation.common import AttributeMap, _extract_attributes_from_mapping from agentops.instrumentation.common.attributes import get_common_attributes from agentops.instrumentation.common.objects import get_uploaded_object_attributes -from agentops.instrumentation.openai.attributes.response import get_response_response_attributes -from agentops.instrumentation.openai_agents import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.providers.openai.attributes.response import get_response_response_attributes +from agentops.instrumentation.agentic.openai_agents import LIBRARY_NAME, LIBRARY_VERSION -from agentops.instrumentation.openai_agents.attributes.model import ( +from agentops.instrumentation.agentic.openai_agents.attributes.model import ( get_model_attributes, get_model_config_attributes, ) -from agentops.instrumentation.openai_agents.attributes.completion import get_generation_output_attributes +from agentops.instrumentation.agentic.openai_agents.attributes.completion import get_generation_output_attributes # Attribute mapping for AgentSpanData diff --git a/agentops/instrumentation/openai_agents/attributes/completion.py b/agentops/instrumentation/agentic/openai_agents/attributes/completion.py similarity index 98% rename from agentops/instrumentation/openai_agents/attributes/completion.py rename to agentops/instrumentation/agentic/openai_agents/attributes/completion.py index d035d6cff..1722109df 100644 --- a/agentops/instrumentation/openai_agents/attributes/completion.py +++ b/agentops/instrumentation/agentic/openai_agents/attributes/completion.py @@ -14,7 +14,7 @@ SpanAttributes, MessageAttributes, ) -from agentops.instrumentation.openai_agents.attributes.tokens import process_token_usage +from agentops.instrumentation.agentic.openai_agents.attributes.tokens import process_token_usage def get_generation_output_attributes(output: Any) -> Dict[str, Any]: diff --git a/agentops/instrumentation/openai_agents/attributes/model.py b/agentops/instrumentation/agentic/openai_agents/attributes/model.py similarity index 100% rename from agentops/instrumentation/openai_agents/attributes/model.py rename to agentops/instrumentation/agentic/openai_agents/attributes/model.py diff --git a/agentops/instrumentation/openai_agents/attributes/tokens.py b/agentops/instrumentation/agentic/openai_agents/attributes/tokens.py similarity index 100% rename from agentops/instrumentation/openai_agents/attributes/tokens.py rename to agentops/instrumentation/agentic/openai_agents/attributes/tokens.py diff --git a/agentops/instrumentation/openai_agents/exporter.py b/agentops/instrumentation/agentic/openai_agents/exporter.py similarity index 99% rename from agentops/instrumentation/openai_agents/exporter.py rename to agentops/instrumentation/agentic/openai_agents/exporter.py index 6e6734971..1fc5b345a 100644 --- a/agentops/instrumentation/openai_agents/exporter.py +++ b/agentops/instrumentation/agentic/openai_agents/exporter.py @@ -33,8 +33,8 @@ get_base_span_attributes, ) -from agentops.instrumentation.openai_agents import LIBRARY_NAME, LIBRARY_VERSION -from agentops.instrumentation.openai_agents.attributes.common import ( +from agentops.instrumentation.agentic.openai_agents import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.agentic.openai_agents.attributes.common import ( get_span_attributes, ) diff --git a/agentops/instrumentation/openai_agents/instrumentor.py b/agentops/instrumentation/agentic/openai_agents/instrumentor.py similarity index 94% rename from agentops/instrumentation/openai_agents/instrumentor.py rename to agentops/instrumentation/agentic/openai_agents/instrumentor.py index 2b92e2d79..00ccded2c 100644 --- a/agentops/instrumentation/openai_agents/instrumentor.py +++ b/agentops/instrumentation/agentic/openai_agents/instrumentor.py @@ -25,11 +25,11 @@ from opentelemetry import trace from opentelemetry.instrumentation.instrumentor import BaseInstrumentor # type: ignore -from agentops.instrumentation.openai_agents import LIBRARY_VERSION +from agentops.instrumentation.agentic.openai_agents import LIBRARY_VERSION from agentops.logging import logger -from agentops.instrumentation.openai_agents.processor import OpenAIAgentsProcessor -from agentops.instrumentation.openai_agents.exporter import OpenAIAgentsExporter +from agentops.instrumentation.agentic.openai_agents.processor import OpenAIAgentsProcessor +from agentops.instrumentation.agentic.openai_agents.exporter import OpenAIAgentsExporter class OpenAIAgentsInstrumentor(BaseInstrumentor): diff --git a/agentops/instrumentation/openai_agents/processor.py b/agentops/instrumentation/agentic/openai_agents/processor.py similarity index 100% rename from agentops/instrumentation/openai_agents/processor.py rename to agentops/instrumentation/agentic/openai_agents/processor.py diff --git a/agentops/instrumentation/smolagents/README.md b/agentops/instrumentation/agentic/smolagents/README.md similarity index 100% rename from agentops/instrumentation/smolagents/README.md rename to agentops/instrumentation/agentic/smolagents/README.md diff --git a/agentops/instrumentation/smolagents/__init__.py b/agentops/instrumentation/agentic/smolagents/__init__.py similarity index 74% rename from agentops/instrumentation/smolagents/__init__.py rename to agentops/instrumentation/agentic/smolagents/__init__.py index 31bab7d70..7a8ede8df 100644 --- a/agentops/instrumentation/smolagents/__init__.py +++ b/agentops/instrumentation/agentic/smolagents/__init__.py @@ -7,6 +7,6 @@ LIBRARY_NAME = _library_info.name LIBRARY_VERSION = _library_info.version -from agentops.instrumentation.smolagents.instrumentor import SmolAgentsInstrumentor # noqa: E402 +from agentops.instrumentation.agentic.smolagents.instrumentor import SmolAgentsInstrumentor # noqa: E402 __all__ = ["SmolAgentsInstrumentor"] diff --git a/agentops/instrumentation/smolagents/attributes/agent.py b/agentops/instrumentation/agentic/smolagents/attributes/agent.py similarity index 100% rename from agentops/instrumentation/smolagents/attributes/agent.py rename to agentops/instrumentation/agentic/smolagents/attributes/agent.py diff --git a/agentops/instrumentation/smolagents/attributes/model.py b/agentops/instrumentation/agentic/smolagents/attributes/model.py similarity index 100% rename from agentops/instrumentation/smolagents/attributes/model.py rename to agentops/instrumentation/agentic/smolagents/attributes/model.py diff --git a/agentops/instrumentation/smolagents/instrumentor.py b/agentops/instrumentation/agentic/smolagents/instrumentor.py similarity index 83% rename from agentops/instrumentation/smolagents/instrumentor.py rename to agentops/instrumentation/agentic/smolagents/instrumentor.py index 37b6b6877..d19323b72 100644 --- a/agentops/instrumentation/smolagents/instrumentor.py +++ b/agentops/instrumentation/agentic/smolagents/instrumentor.py @@ -1,11 +1,11 @@ """SmoLAgents instrumentation for AgentOps.""" -from typing import Collection, List +from typing import Dict, Any from opentelemetry.trace import SpanKind +from opentelemetry.metrics import Meter from wrapt import wrap_function_wrapper -from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics -from agentops.instrumentation.common.wrappers import WrapConfig +from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics, InstrumentorConfig from agentops.logging import logger # Library info for tracer/meter @@ -14,7 +14,7 @@ # Import attribute handlers try: - from agentops.instrumentation.smolagents.attributes.agent import ( + from agentops.instrumentation.agentic.smolagents.attributes.agent import ( get_agent_attributes, get_tool_call_attributes, get_planning_step_attributes, @@ -22,7 +22,7 @@ get_agent_stream_attributes, get_managed_agent_attributes, ) - from agentops.instrumentation.smolagents.attributes.model import ( + from agentops.instrumentation.agentic.smolagents.attributes.model import ( get_model_attributes, get_stream_attributes, ) @@ -58,54 +58,43 @@ class SmolAgentsInstrumentor(BaseAgentOpsInstrumentor): def __init__(self): """Initialize the SmoLAgents instrumentor.""" - super().__init__( - name="smolagents", - version=LIBRARY_VERSION, + # Create instrumentor config + config = InstrumentorConfig( library_name=LIBRARY_NAME, + library_version=LIBRARY_VERSION, + wrapped_methods=[], # We use custom wrapping + metrics_enabled=True, + dependencies=["smolagents >= 1.0.0", "litellm"], ) - def instrumentation_dependencies(self) -> Collection[str]: - return ( - "smolagents >= 1.0.0", - "litellm", - ) + super().__init__(config) - def _get_wrapped_methods(self) -> List[WrapConfig]: - """Return list of methods to be wrapped. + def _create_metrics(self, meter: Meter) -> Dict[str, Any]: + """Create metrics for the instrumentor. - For SmoLAgents, we don't use the standard wrapping mechanism - since we need custom wrappers with special logic. + Returns a dictionary of metric name to metric instance. """ - return [] - - def _instrument(self, **kwargs): - """Instrument SmoLAgents with AgentOps telemetry.""" - # Note: We don't call super()._instrument() here because we're not using - # the standard wrapping mechanism for this special instrumentor - - # Get tracer from base class - self._tracer_provider = kwargs.get("tracer_provider") - self._meter_provider = kwargs.get("meter_provider") + # Create standard metrics for LLM operations + return StandardMetrics.create_standard_metrics(meter) - # Initialize tracer and meter (these are set by base class properties) - tracer = self._tracer - - # Create standard metrics - self._metrics = StandardMetrics(self._meter) - self._metrics.create_llm_metrics(system_name="SmoLAgents", operation_description="SmoLAgents operation") + def _custom_wrap(self, **kwargs): + """Apply custom wrapping for SmoLAgents. + This is called after normal wrapping, but we use it for all wrapping + since we don't have normal wrapped methods. + """ # Core agent operations - wrap_function_wrapper("smolagents.agents", "CodeAgent.run", self._agent_run_wrapper(tracer)) - wrap_function_wrapper("smolagents.agents", "ToolCallingAgent.run", self._agent_run_wrapper(tracer)) + wrap_function_wrapper("smolagents.agents", "CodeAgent.run", self._agent_run_wrapper(self._tracer)) + wrap_function_wrapper("smolagents.agents", "ToolCallingAgent.run", self._agent_run_wrapper(self._tracer)) # Tool calling operations wrap_function_wrapper( - "smolagents.agents", "ToolCallingAgent.execute_tool_call", self._tool_execution_wrapper(tracer) + "smolagents.agents", "ToolCallingAgent.execute_tool_call", self._tool_execution_wrapper(self._tracer) ) # Model operations with proper model name extraction - wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate", self._llm_wrapper(tracer)) - wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate_stream", self._llm_wrapper(tracer)) + wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate", self._llm_wrapper(self._tracer)) + wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate_stream", self._llm_wrapper(self._tracer)) logger.info("SmoLAgents instrumentation enabled") @@ -261,11 +250,11 @@ def wrapper(wrapped, instance, args, kwargs): return wrapper - def _uninstrument(self, **kwargs): - """Remove instrumentation.""" - # Note: We don't call super()._uninstrument() here because we're not using - # the standard wrapping mechanism for this special instrumentor + def _custom_unwrap(self, **kwargs): + """Remove custom wrapping from SmoLAgents. + This method removes all custom wrapping we applied. + """ # Unwrap all instrumented methods from opentelemetry.instrumentation.utils import unwrap diff --git a/agentops/instrumentation/smolagents/stream_wrapper.py b/agentops/instrumentation/agentic/smolagents/stream_wrapper.py similarity index 100% rename from agentops/instrumentation/smolagents/stream_wrapper.py rename to agentops/instrumentation/agentic/smolagents/stream_wrapper.py diff --git a/agentops/instrumentation/crewai/__init__.py b/agentops/instrumentation/crewai/__init__.py deleted file mode 100644 index a5f1a5a99..000000000 --- a/agentops/instrumentation/crewai/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -"""OpenTelemetry CrewAI instrumentation""" - -from agentops.instrumentation.crewai.version import __version__ -from agentops.instrumentation.crewai.instrumentation import CrewAIInstrumentor - -__all__ = ["CrewAIInstrumentor", "__version__"] diff --git a/agentops/instrumentation/google_adk/instrumentor.py b/agentops/instrumentation/google_adk/instrumentor.py deleted file mode 100644 index 53f81f76b..000000000 --- a/agentops/instrumentation/google_adk/instrumentor.py +++ /dev/null @@ -1,88 +0,0 @@ -"""Google ADK Instrumentation for AgentOps - -This module provides instrumentation for Google's Agent Development Kit (ADK). -It uses a patching approach to: -1. Disable ADK's built-in telemetry to prevent duplicate spans -2. Create AgentOps spans that mirror ADK's telemetry structure -3. Extract and properly index LLM messages and tool calls -""" - -from typing import Collection, List - -from agentops.logging import logger -from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics -from agentops.instrumentation.common.wrappers import WrapConfig -from agentops.instrumentation.google_adk.patch import patch_adk, unpatch_adk - -# Library info for tracer/meter -LIBRARY_NAME = "agentops.instrumentation.google_adk" -LIBRARY_VERSION = "0.1.0" - - -class GoogleADKInstrumentor(BaseAgentOpsInstrumentor): - """An instrumentor for Google Agent Development Kit (ADK). - - This instrumentor patches Google ADK to: - - Prevent ADK from creating its own telemetry spans - - Create AgentOps spans for agent runs, LLM calls, and tool calls - - Properly extract and index message content and tool interactions - """ - - def __init__(self): - """Initialize the Google ADK instrumentor.""" - super().__init__( - name="google_adk", - version=LIBRARY_VERSION, - library_name=LIBRARY_NAME, - ) - - def instrumentation_dependencies(self) -> Collection[str]: - """Return packages required for instrumentation.""" - return ["google-adk >= 0.1.0"] - - def _get_wrapped_methods(self) -> List[WrapConfig]: - """ - Return list of methods to be wrapped. - - For Google ADK, we don't use the standard wrapping mechanism - since we're using a patching approach instead. - """ - return [] - - def _instrument(self, **kwargs): - """Instrument the Google ADK. - - This method: - 1. Disables ADK's built-in telemetry - 2. Patches key ADK methods to create AgentOps spans - 3. Sets up metrics for tracking token usage and operation duration - """ - # Note: We don't call super()._instrument() here because we're not using - # the standard wrapping mechanism for this special instrumentor - - # Get tracer and meter from base class - self._tracer_provider = kwargs.get("tracer_provider") - self._meter_provider = kwargs.get("meter_provider") - - # Initialize tracer and meter (these are set by base class properties) - _ = self._tracer - _ = self._meter - - # Create standard metrics for LLM operations - self._metrics = StandardMetrics(self._meter) - self._metrics.create_llm_metrics(system_name="Google ADK", operation_description="Google ADK operation") - - # Apply patches with our tracer - patch_adk(self._tracer) - logger.info("Google ADK instrumentation enabled") - - def _uninstrument(self, **kwargs): - """Remove instrumentation from Google ADK. - - This method removes all patches and restores ADK's original behavior. - """ - # Note: We don't call super()._uninstrument() here because we're not using - # the standard wrapping mechanism for this special instrumentor - - unpatch_adk() - logger.info("Google ADK instrumentation disabled") diff --git a/agentops/instrumentation/anthropic/__init__.py b/agentops/instrumentation/providers/anthropic/__init__.py similarity index 84% rename from agentops/instrumentation/anthropic/__init__.py rename to agentops/instrumentation/providers/anthropic/__init__.py index 47b4b758a..1045b718f 100644 --- a/agentops/instrumentation/anthropic/__init__.py +++ b/agentops/instrumentation/providers/anthropic/__init__.py @@ -15,7 +15,7 @@ LIBRARY_VERSION = _library_info.version # Import after defining constants to avoid circular imports -from agentops.instrumentation.anthropic.instrumentor import AnthropicInstrumentor # noqa: E402 +from agentops.instrumentation.providers.anthropic.instrumentor import AnthropicInstrumentor # noqa: E402 __all__ = [ "LIBRARY_NAME", diff --git a/agentops/instrumentation/anthropic/attributes/__init__.py b/agentops/instrumentation/providers/anthropic/attributes/__init__.py similarity index 55% rename from agentops/instrumentation/anthropic/attributes/__init__.py rename to agentops/instrumentation/providers/anthropic/attributes/__init__.py index cd72cf8ad..40a922bcf 100644 --- a/agentops/instrumentation/anthropic/attributes/__init__.py +++ b/agentops/instrumentation/providers/anthropic/attributes/__init__.py @@ -1,8 +1,11 @@ """Attribute extraction for Anthropic API instrumentation.""" -from agentops.instrumentation.anthropic.attributes.common import get_common_instrumentation_attributes -from agentops.instrumentation.anthropic.attributes.message import get_message_attributes, get_completion_attributes -from agentops.instrumentation.anthropic.attributes.tools import ( +from agentops.instrumentation.providers.anthropic.attributes.common import get_common_instrumentation_attributes +from agentops.instrumentation.providers.anthropic.attributes.message import ( + get_message_attributes, + get_completion_attributes, +) +from agentops.instrumentation.providers.anthropic.attributes.tools import ( extract_tool_definitions, extract_tool_use_blocks, extract_tool_results, diff --git a/agentops/instrumentation/anthropic/attributes/common.py b/agentops/instrumentation/providers/anthropic/attributes/common.py similarity index 95% rename from agentops/instrumentation/anthropic/attributes/common.py rename to agentops/instrumentation/providers/anthropic/attributes/common.py index b10063e5a..3d5ad647e 100644 --- a/agentops/instrumentation/anthropic/attributes/common.py +++ b/agentops/instrumentation/providers/anthropic/attributes/common.py @@ -4,7 +4,7 @@ from agentops.semconv import InstrumentationAttributes, SpanAttributes from agentops.instrumentation.common.attributes import AttributeMap, get_common_attributes -from agentops.instrumentation.anthropic import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.providers.anthropic import LIBRARY_NAME, LIBRARY_VERSION def get_common_instrumentation_attributes() -> AttributeMap: diff --git a/agentops/instrumentation/anthropic/attributes/message.py b/agentops/instrumentation/providers/anthropic/attributes/message.py similarity index 99% rename from agentops/instrumentation/anthropic/attributes/message.py rename to agentops/instrumentation/providers/anthropic/attributes/message.py index c99885b02..4346a2673 100644 --- a/agentops/instrumentation/anthropic/attributes/message.py +++ b/agentops/instrumentation/providers/anthropic/attributes/message.py @@ -16,11 +16,11 @@ MessageAttributes, ) from agentops.instrumentation.common.attributes import AttributeMap -from agentops.instrumentation.anthropic.attributes.common import ( +from agentops.instrumentation.providers.anthropic.attributes.common import ( get_common_instrumentation_attributes, extract_request_attributes, ) -from agentops.instrumentation.anthropic.attributes.tools import ( +from agentops.instrumentation.providers.anthropic.attributes.tools import ( extract_tool_definitions, get_tool_attributes, ) diff --git a/agentops/instrumentation/anthropic/attributes/tools.py b/agentops/instrumentation/providers/anthropic/attributes/tools.py similarity index 100% rename from agentops/instrumentation/anthropic/attributes/tools.py rename to agentops/instrumentation/providers/anthropic/attributes/tools.py diff --git a/agentops/instrumentation/anthropic/event_handler_wrapper.py b/agentops/instrumentation/providers/anthropic/event_handler_wrapper.py similarity index 100% rename from agentops/instrumentation/anthropic/event_handler_wrapper.py rename to agentops/instrumentation/providers/anthropic/event_handler_wrapper.py diff --git a/agentops/instrumentation/anthropic/instrumentor.py b/agentops/instrumentation/providers/anthropic/instrumentor.py similarity index 94% rename from agentops/instrumentation/anthropic/instrumentor.py rename to agentops/instrumentation/providers/anthropic/instrumentor.py index dca75564e..75ed1b4bb 100644 --- a/agentops/instrumentation/anthropic/instrumentor.py +++ b/agentops/instrumentation/providers/anthropic/instrumentor.py @@ -33,9 +33,12 @@ from agentops.logging import logger from agentops.instrumentation.common import BaseAgentOpsInstrumentor, InstrumentorConfig, WrapConfig, StandardMetrics -from agentops.instrumentation.anthropic import LIBRARY_NAME, LIBRARY_VERSION -from agentops.instrumentation.anthropic.attributes.message import get_message_attributes, get_completion_attributes -from agentops.instrumentation.anthropic.stream_wrapper import ( +from agentops.instrumentation.providers.anthropic import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.providers.anthropic.attributes.message import ( + get_message_attributes, + get_completion_attributes, +) +from agentops.instrumentation.providers.anthropic.stream_wrapper import ( messages_stream_wrapper, messages_stream_async_wrapper, ) diff --git a/agentops/instrumentation/anthropic/stream_wrapper.py b/agentops/instrumentation/providers/anthropic/stream_wrapper.py similarity index 99% rename from agentops/instrumentation/anthropic/stream_wrapper.py rename to agentops/instrumentation/providers/anthropic/stream_wrapper.py index 6603193e1..c42084c19 100644 --- a/agentops/instrumentation/anthropic/stream_wrapper.py +++ b/agentops/instrumentation/providers/anthropic/stream_wrapper.py @@ -14,11 +14,11 @@ from agentops.semconv import SpanAttributes, LLMRequestTypeValues, CoreAttributes, MessageAttributes from agentops.instrumentation.common.wrappers import _with_tracer_wrapper -from agentops.instrumentation.anthropic.attributes.message import ( +from agentops.instrumentation.providers.anthropic.attributes.message import ( get_message_request_attributes, get_stream_attributes, ) -from agentops.instrumentation.anthropic.event_handler_wrapper import EventHandleWrapper +from agentops.instrumentation.providers.anthropic.event_handler_wrapper import EventHandleWrapper logger = logging.getLogger(__name__) diff --git a/agentops/instrumentation/google_genai/README.md b/agentops/instrumentation/providers/google_genai/README.md similarity index 100% rename from agentops/instrumentation/google_genai/README.md rename to agentops/instrumentation/providers/google_genai/README.md diff --git a/agentops/instrumentation/google_genai/__init__.py b/agentops/instrumentation/providers/google_genai/__init__.py similarity index 84% rename from agentops/instrumentation/google_genai/__init__.py rename to agentops/instrumentation/providers/google_genai/__init__.py index 395811d34..0b82d64b1 100644 --- a/agentops/instrumentation/google_genai/__init__.py +++ b/agentops/instrumentation/providers/google_genai/__init__.py @@ -15,7 +15,7 @@ LIBRARY_VERSION = _library_info.version # Import after defining constants to avoid circular imports -from agentops.instrumentation.google_genai.instrumentor import GoogleGenAIInstrumentor # noqa: E402 +from agentops.instrumentation.providers.google_genai.instrumentor import GoogleGenAIInstrumentor # noqa: E402 __all__ = [ "LIBRARY_NAME", diff --git a/agentops/instrumentation/google_genai/attributes/__init__.py b/agentops/instrumentation/providers/google_genai/attributes/__init__.py similarity index 70% rename from agentops/instrumentation/google_genai/attributes/__init__.py rename to agentops/instrumentation/providers/google_genai/attributes/__init__.py index 94407d6cb..72c15392c 100644 --- a/agentops/instrumentation/google_genai/attributes/__init__.py +++ b/agentops/instrumentation/providers/google_genai/attributes/__init__.py @@ -1,16 +1,16 @@ """Attribute extractors for Google Generative AI instrumentation.""" -from agentops.instrumentation.google_genai.attributes.common import ( +from agentops.instrumentation.providers.google_genai.attributes.common import ( get_common_instrumentation_attributes, extract_request_attributes, ) -from agentops.instrumentation.google_genai.attributes.model import ( +from agentops.instrumentation.providers.google_genai.attributes.model import ( get_model_attributes, get_generate_content_attributes, get_stream_attributes, get_token_counting_attributes, ) -from agentops.instrumentation.google_genai.attributes.chat import ( +from agentops.instrumentation.providers.google_genai.attributes.chat import ( get_chat_attributes, ) diff --git a/agentops/instrumentation/google_genai/attributes/chat.py b/agentops/instrumentation/providers/google_genai/attributes/chat.py similarity index 96% rename from agentops/instrumentation/google_genai/attributes/chat.py rename to agentops/instrumentation/providers/google_genai/attributes/chat.py index 7b9c3a8ac..7bd4de998 100644 --- a/agentops/instrumentation/google_genai/attributes/chat.py +++ b/agentops/instrumentation/providers/google_genai/attributes/chat.py @@ -5,11 +5,11 @@ from agentops.logging import logger from agentops.semconv import SpanAttributes, LLMRequestTypeValues, MessageAttributes from agentops.instrumentation.common.attributes import AttributeMap -from agentops.instrumentation.google_genai.attributes.common import ( +from agentops.instrumentation.providers.google_genai.attributes.common import ( extract_request_attributes, get_common_instrumentation_attributes, ) -from agentops.instrumentation.google_genai.attributes.model import ( +from agentops.instrumentation.providers.google_genai.attributes.model import ( _extract_content_from_prompt, _set_response_attributes, ) diff --git a/agentops/instrumentation/google_genai/attributes/common.py b/agentops/instrumentation/providers/google_genai/attributes/common.py similarity index 97% rename from agentops/instrumentation/google_genai/attributes/common.py rename to agentops/instrumentation/providers/google_genai/attributes/common.py index da158d291..77d69d908 100644 --- a/agentops/instrumentation/google_genai/attributes/common.py +++ b/agentops/instrumentation/providers/google_genai/attributes/common.py @@ -9,7 +9,7 @@ get_common_attributes, _extract_attributes_from_mapping, ) -from agentops.instrumentation.google_genai import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.providers.google_genai import LIBRARY_NAME, LIBRARY_VERSION # Common mapping for config parameters REQUEST_CONFIG_ATTRIBUTES: AttributeMap = { diff --git a/agentops/instrumentation/google_genai/attributes/model.py b/agentops/instrumentation/providers/google_genai/attributes/model.py similarity index 99% rename from agentops/instrumentation/google_genai/attributes/model.py rename to agentops/instrumentation/providers/google_genai/attributes/model.py index 022a4fbac..414fd439b 100644 --- a/agentops/instrumentation/google_genai/attributes/model.py +++ b/agentops/instrumentation/providers/google_genai/attributes/model.py @@ -5,7 +5,7 @@ from agentops.logging import logger from agentops.semconv import SpanAttributes, LLMRequestTypeValues, MessageAttributes from agentops.instrumentation.common.attributes import AttributeMap -from agentops.instrumentation.google_genai.attributes.common import ( +from agentops.instrumentation.providers.google_genai.attributes.common import ( extract_request_attributes, get_common_instrumentation_attributes, ) diff --git a/agentops/instrumentation/google_genai/instrumentor.py b/agentops/instrumentation/providers/google_genai/instrumentor.py similarity index 74% rename from agentops/instrumentation/google_genai/instrumentor.py rename to agentops/instrumentation/providers/google_genai/instrumentor.py index 6c242c621..282729b98 100644 --- a/agentops/instrumentation/google_genai/instrumentor.py +++ b/agentops/instrumentation/providers/google_genai/instrumentor.py @@ -8,17 +8,18 @@ - Streaming responses - Special handling for streaming responses """ -from typing import List, Collection +from typing import List, Dict, Any from wrapt import wrap_function_wrapper +from opentelemetry.metrics import Meter from agentops.logging import logger -from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics +from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics, InstrumentorConfig from agentops.instrumentation.common.wrappers import WrapConfig -from agentops.instrumentation.google_genai.attributes.model import ( +from agentops.instrumentation.providers.google_genai.attributes.model import ( get_generate_content_attributes, get_token_counting_attributes, ) -from agentops.instrumentation.google_genai.stream_wrapper import ( +from agentops.instrumentation.providers.google_genai.stream_wrapper import ( generate_content_stream_wrapper, generate_content_stream_async_wrapper, ) @@ -108,43 +109,32 @@ class GoogleGenAIInstrumentor(BaseAgentOpsInstrumentor): def __init__(self): """Initialize the Google GenAI instrumentor.""" - super().__init__( - name="google_genai", - version=LIBRARY_VERSION, + config = InstrumentorConfig( library_name=LIBRARY_NAME, + library_version=LIBRARY_VERSION, + wrapped_methods=WRAPPED_METHODS, + metrics_enabled=True, + dependencies=["google-genai >= 0.1.0"], ) + super().__init__(config) - def instrumentation_dependencies(self) -> Collection[str]: - """Return packages required for instrumentation. + def _create_metrics(self, meter: Meter) -> Dict[str, Any]: + """Create standard LLM metrics for Google GenAI operations. + + Args: + meter: The OpenTelemetry meter to use for creating metrics. Returns: - A collection of package specifications required for this instrumentation. + Dictionary containing the created metrics. """ - return ["google-genai >= 0.1.0"] - - def _get_wrapped_methods(self) -> List[WrapConfig]: - """Return list of methods to be wrapped.""" - return WRAPPED_METHODS + return StandardMetrics.create_standard_metrics(meter) - def _instrument(self, **kwargs): - """Instrument the Google Generative AI API. - - This method wraps the key methods in the Google Generative AI client to capture - telemetry data for API calls. It sets up tracers, meters, and wraps the appropriate - methods for instrumentation. + def _custom_wrap(self, **kwargs): + """Perform custom wrapping for streaming methods. Args: **kwargs: Configuration options for instrumentation. """ - # Call parent implementation to handle standard method wrapping - super()._instrument(**kwargs) - - # Create standard metrics for LLM operations - self._metrics = StandardMetrics(self._meter) - self._metrics.create_llm_metrics( - system_name="Google Generative AI", operation_description="Google Generative AI operation" - ) - # Special handling for streaming responses for stream_method in STREAMING_METHODS: try: @@ -161,18 +151,12 @@ def _instrument(self, **kwargs): logger.info("Google Generative AI instrumentation enabled") - def _uninstrument(self, **kwargs): - """Remove instrumentation from Google Generative AI API. - - This method unwraps all methods that were wrapped during instrumentation, - restoring the original behavior of the Google Generative AI API. + def _custom_unwrap(self, **kwargs): + """Remove custom wrapping for streaming methods. Args: **kwargs: Configuration options for uninstrumentation. """ - # Call parent implementation to handle standard method unwrapping - super()._uninstrument(**kwargs) - # Unwrap streaming methods from opentelemetry.instrumentation.utils import unwrap as otel_unwrap diff --git a/agentops/instrumentation/google_genai/stream_wrapper.py b/agentops/instrumentation/providers/google_genai/stream_wrapper.py similarity index 98% rename from agentops/instrumentation/google_genai/stream_wrapper.py rename to agentops/instrumentation/providers/google_genai/stream_wrapper.py index 9b61cee62..04e93878d 100644 --- a/agentops/instrumentation/google_genai/stream_wrapper.py +++ b/agentops/instrumentation/providers/google_genai/stream_wrapper.py @@ -14,11 +14,11 @@ from agentops.semconv import SpanAttributes, LLMRequestTypeValues, CoreAttributes, MessageAttributes from agentops.instrumentation.common.wrappers import _with_tracer_wrapper -from agentops.instrumentation.google_genai.attributes.model import ( +from agentops.instrumentation.providers.google_genai.attributes.model import ( get_generate_content_attributes, get_stream_attributes, ) -from agentops.instrumentation.google_genai.attributes.common import ( +from agentops.instrumentation.providers.google_genai.attributes.common import ( extract_request_attributes, ) diff --git a/agentops/instrumentation/ibm_watsonx_ai/__init__.py b/agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py similarity index 87% rename from agentops/instrumentation/ibm_watsonx_ai/__init__.py rename to agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py index 56eeb9def..e590ebff9 100644 --- a/agentops/instrumentation/ibm_watsonx_ai/__init__.py +++ b/agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py @@ -19,7 +19,7 @@ LIBRARY_VERSION = _library_info.version # Import after defining constants to avoid circular imports -from agentops.instrumentation.ibm_watsonx_ai.instrumentor import IBMWatsonXInstrumentor # noqa: E402 +from agentops.instrumentation.providers.ibm_watsonx_ai.instrumentor import IBMWatsonXInstrumentor # noqa: E402 __all__ = [ "LIBRARY_NAME", diff --git a/agentops/instrumentation/ibm_watsonx_ai/attributes/__init__.py b/agentops/instrumentation/providers/ibm_watsonx_ai/attributes/__init__.py similarity index 79% rename from agentops/instrumentation/ibm_watsonx_ai/attributes/__init__.py rename to agentops/instrumentation/providers/ibm_watsonx_ai/attributes/__init__.py index bd3c42928..874d6a4e4 100644 --- a/agentops/instrumentation/ibm_watsonx_ai/attributes/__init__.py +++ b/agentops/instrumentation/providers/ibm_watsonx_ai/attributes/__init__.py @@ -1,12 +1,12 @@ """Attribute extraction utilities for IBM watsonx.ai instrumentation.""" -from agentops.instrumentation.ibm_watsonx_ai.attributes.attributes import ( +from agentops.instrumentation.providers.ibm_watsonx_ai.attributes.attributes import ( get_generate_attributes, get_chat_attributes, get_tokenize_attributes, get_model_details_attributes, ) -from agentops.instrumentation.ibm_watsonx_ai.attributes.common import ( +from agentops.instrumentation.providers.ibm_watsonx_ai.attributes.common import ( extract_params_attributes, convert_params_to_dict, extract_prompt_from_args, diff --git a/agentops/instrumentation/ibm_watsonx_ai/attributes/attributes.py b/agentops/instrumentation/providers/ibm_watsonx_ai/attributes/attributes.py similarity index 99% rename from agentops/instrumentation/ibm_watsonx_ai/attributes/attributes.py rename to agentops/instrumentation/providers/ibm_watsonx_ai/attributes/attributes.py index c9db673e4..3f50cb568 100644 --- a/agentops/instrumentation/ibm_watsonx_ai/attributes/attributes.py +++ b/agentops/instrumentation/providers/ibm_watsonx_ai/attributes/attributes.py @@ -6,7 +6,7 @@ from typing import Any, Dict, Optional, Tuple from agentops.instrumentation.common.attributes import AttributeMap from agentops.semconv import SpanAttributes, MessageAttributes -from agentops.instrumentation.ibm_watsonx_ai.attributes.common import ( +from agentops.instrumentation.providers.ibm_watsonx_ai.attributes.common import ( extract_params_attributes, convert_params_to_dict, extract_prompt_from_args, diff --git a/agentops/instrumentation/ibm_watsonx_ai/attributes/common.py b/agentops/instrumentation/providers/ibm_watsonx_ai/attributes/common.py similarity index 100% rename from agentops/instrumentation/ibm_watsonx_ai/attributes/common.py rename to agentops/instrumentation/providers/ibm_watsonx_ai/attributes/common.py diff --git a/agentops/instrumentation/ibm_watsonx_ai/instrumentor.py b/agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py similarity index 77% rename from agentops/instrumentation/ibm_watsonx_ai/instrumentor.py rename to agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py index 814af6235..dd5ec27d4 100644 --- a/agentops/instrumentation/ibm_watsonx_ai/instrumentor.py +++ b/agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py @@ -12,19 +12,23 @@ - Model.get_details - Model details API """ -from typing import List, Collection +from typing import List, Dict, Any from wrapt import wrap_function_wrapper +from opentelemetry.metrics import Meter from agentops.logging import logger -from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics +from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics, InstrumentorConfig from agentops.instrumentation.common.wrappers import WrapConfig -from agentops.instrumentation.ibm_watsonx_ai.attributes.attributes import ( +from agentops.instrumentation.providers.ibm_watsonx_ai.attributes.attributes import ( get_generate_attributes, get_tokenize_attributes, get_model_details_attributes, get_chat_attributes, ) -from agentops.instrumentation.ibm_watsonx_ai.stream_wrapper import generate_text_stream_wrapper, chat_stream_wrapper +from agentops.instrumentation.providers.ibm_watsonx_ai.stream_wrapper import ( + generate_text_stream_wrapper, + chat_stream_wrapper, +) # Library info for tracer/meter LIBRARY_NAME = "agentops.instrumentation.ibm_watsonx_ai" @@ -82,32 +86,33 @@ class IBMWatsonXInstrumentor(BaseAgentOpsInstrumentor): def __init__(self): """Initialize the IBM watsonx.ai instrumentor.""" - super().__init__( - name="ibm_watsonx_ai", - version=LIBRARY_VERSION, + # Filter out stream methods that need custom wrapping + standard_methods = [ + wc for wc in WRAPPED_METHODS if wc.method_name not in ["generate_text_stream", "chat_stream"] + ] + + config = InstrumentorConfig( library_name=LIBRARY_NAME, + library_version=LIBRARY_VERSION, + wrapped_methods=standard_methods, + metrics_enabled=True, + dependencies=["ibm-watsonx-ai >= 1.3.11"], ) + super().__init__(config) - def instrumentation_dependencies(self) -> Collection[str]: - """Return packages required for instrumentation.""" - return ["ibm-watsonx-ai >= 1.3.11"] + def _create_metrics(self, meter: Meter) -> Dict[str, Any]: + """Create metrics for IBM watsonx.ai operations. - def _get_wrapped_methods(self) -> List[WrapConfig]: - """Return list of methods to be wrapped. + Args: + meter: The OpenTelemetry meter to use for creating metrics. - Note: We filter out stream methods here as they need dedicated wrappers. + Returns: + Dictionary containing the created metrics. """ - return [wc for wc in WRAPPED_METHODS if wc.method_name not in ["generate_text_stream", "chat_stream"]] - - def _instrument(self, **kwargs): - """Instrument the IBM watsonx.ai API.""" - # Call parent implementation to handle standard method wrapping - super()._instrument(**kwargs) - - # Create standard metrics for LLM operations - self._metrics = StandardMetrics(self._meter) - self._metrics.create_llm_metrics(system_name="IBM watsonx.ai", operation_description="IBM watsonx.ai operation") + return StandardMetrics.create_standard_metrics(meter) + def _custom_wrap(self, **kwargs): + """Perform custom wrapping for streaming methods.""" # Dedicated wrappers for stream methods try: generate_text_stream_config = next(wc for wc in WRAPPED_METHODS if wc.method_name == "generate_text_stream") @@ -137,11 +142,8 @@ def _instrument(self, **kwargs): logger.info("IBM watsonx.ai instrumentation enabled") - def _uninstrument(self, **kwargs): - """Remove instrumentation from IBM watsonx.ai API.""" - # Call parent implementation to handle standard method unwrapping - super()._uninstrument(**kwargs) - + def _custom_unwrap(self, **kwargs): + """Remove custom wrapping for streaming methods.""" # Unwrap streaming methods manually from opentelemetry.instrumentation.utils import unwrap as otel_unwrap diff --git a/agentops/instrumentation/ibm_watsonx_ai/stream_wrapper.py b/agentops/instrumentation/providers/ibm_watsonx_ai/stream_wrapper.py similarity index 99% rename from agentops/instrumentation/ibm_watsonx_ai/stream_wrapper.py rename to agentops/instrumentation/providers/ibm_watsonx_ai/stream_wrapper.py index 9ff39cf69..f84bca8fd 100644 --- a/agentops/instrumentation/ibm_watsonx_ai/stream_wrapper.py +++ b/agentops/instrumentation/providers/ibm_watsonx_ai/stream_wrapper.py @@ -7,8 +7,8 @@ import json from opentelemetry.trace import get_tracer, SpanKind from agentops.logging import logger -from agentops.instrumentation.ibm_watsonx_ai import LIBRARY_NAME, LIBRARY_VERSION -from agentops.instrumentation.ibm_watsonx_ai.attributes.common import ( +from agentops.instrumentation.providers.ibm_watsonx_ai import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.providers.ibm_watsonx_ai.attributes.common import ( extract_params_attributes, convert_params_to_dict, extract_prompt_from_args, diff --git a/agentops/instrumentation/mem0/__init__.py b/agentops/instrumentation/providers/mem0/__init__.py similarity index 92% rename from agentops/instrumentation/mem0/__init__.py rename to agentops/instrumentation/providers/mem0/__init__.py index 2a8309abb..7e39db016 100644 --- a/agentops/instrumentation/mem0/__init__.py +++ b/agentops/instrumentation/providers/mem0/__init__.py @@ -27,7 +27,7 @@ LIBRARY_VERSION = "1.0.0" # Internal version for instrumentation # Import after defining constants to avoid circular imports -from agentops.instrumentation.mem0.instrumentor import Mem0Instrumentor # noqa: E402 +from agentops.instrumentation.providers.mem0.instrumentor import Mem0Instrumentor # noqa: E402 __all__ = [ "LIBRARY_NAME", diff --git a/agentops/instrumentation/mem0/common.py b/agentops/instrumentation/providers/mem0/common.py similarity index 100% rename from agentops/instrumentation/mem0/common.py rename to agentops/instrumentation/providers/mem0/common.py diff --git a/agentops/instrumentation/mem0/instrumentor.py b/agentops/instrumentation/providers/mem0/instrumentor.py similarity index 77% rename from agentops/instrumentation/mem0/instrumentor.py rename to agentops/instrumentation/providers/mem0/instrumentor.py index 3933771c3..124d84de1 100644 --- a/agentops/instrumentation/mem0/instrumentor.py +++ b/agentops/instrumentation/providers/mem0/instrumentor.py @@ -1,8 +1,8 @@ -from typing import Collection, List +from typing import Dict, Any from wrapt import wrap_function_wrapper +from opentelemetry.metrics import Meter -from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics -from agentops.instrumentation.common.wrappers import WrapConfig +from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics, InstrumentorConfig from agentops.logging import logger # Import from refactored structure @@ -196,69 +196,50 @@ class Mem0Instrumentor(BaseAgentOpsInstrumentor): def __init__(self): """Initialize the Mem0 instrumentor.""" - super().__init__( - name="mem0", - version=LIBRARY_VERSION, + config = InstrumentorConfig( library_name=LIBRARY_NAME, + library_version=LIBRARY_VERSION, + wrapped_methods=[], # We use custom wrapping for Mem0 + metrics_enabled=True, + dependencies=["mem0ai >= 0.1.10"], ) + super().__init__(config) - def instrumentation_dependencies(self) -> Collection[str]: - """Return packages required for instrumentation. + def _create_metrics(self, meter: Meter) -> Dict[str, Any]: + """Create metrics for Mem0 operations. + + Args: + meter: The OpenTelemetry meter to use for creating metrics. Returns: - A collection of package specifications required for this instrumentation. + Dictionary containing the created metrics. """ - return ["mem0ai >= 0.1.10"] - - def _get_wrapped_methods(self) -> List[WrapConfig]: - """Return list of methods to be wrapped. + metrics = StandardMetrics.create_standard_metrics(meter) - For Mem0, we don't use the standard wrapping mechanism - since we're using specialized wrappers instead. - """ - return [] + # Add Mem0-specific memory count metric + metrics["memory_count_histogram"] = meter.create_histogram( + name="mem0.memory.count", + unit="memory", + description="Number of memories processed in Mem0 operations", + ) - def _instrument(self, **kwargs): - """Instrument the Mem0 Memory API. + return metrics - This method wraps the key methods in the Mem0 Memory client to capture - telemetry data for memory operations. It sets up tracers, meters, and wraps the - appropriate methods for instrumentation. + def _custom_wrap(self, **kwargs): + """Perform custom wrapping for Mem0 methods. Args: **kwargs: Configuration options for instrumentation. """ - # Note: We don't call super()._instrument() here because we're not using - # the standard wrapping mechanism for this special instrumentor - logger.debug("Starting Mem0 instrumentation...") - # Get tracer and meter from base class properties - self._tracer_provider = kwargs.get("tracer_provider") - self._meter_provider = kwargs.get("meter_provider") - - # Initialize tracer and meter (these are set by base class properties) - tracer = self._tracer - meter = self._meter - - # Create standard metrics for memory operations - self._metrics = StandardMetrics(meter) - self._metrics.create_llm_metrics(system_name="Mem0", operation_description="Mem0 memory operation") - - # Create additional metrics specific to memory operations - meter.create_histogram( - name="mem0.memory.count", - unit="memory", - description="Number of memories processed in Mem0 operations", - ) - # Use specialized wrappers that ensure proper context hierarchy for method_config in WRAPPER_METHODS: try: package = method_config["package"] class_method = method_config["class_method"] wrapper_func = method_config["wrapper"] - wrap_function_wrapper(package, class_method, wrapper_func(tracer)) + wrap_function_wrapper(package, class_method, wrapper_func(self._tracer)) except (AttributeError, ModuleNotFoundError) as e: # Use debug level for missing optional packages instead of error # since LLM providers are optional dependencies @@ -269,18 +250,12 @@ def _instrument(self, **kwargs): logger.info("Mem0 instrumentation enabled") - def _uninstrument(self, **kwargs): - """Remove instrumentation from Mem0 Memory API. - - This method unwraps all methods that were wrapped during instrumentation, - restoring the original behavior of the Mem0 Memory API. + def _custom_unwrap(self, **kwargs): + """Remove custom wrapping for Mem0 methods. Args: **kwargs: Configuration options for uninstrumentation. """ - # Note: We don't call super()._uninstrument() here because we're not using - # the standard wrapping mechanism for this special instrumentor - # Unwrap specialized methods from opentelemetry.instrumentation.utils import unwrap diff --git a/agentops/instrumentation/mem0/memory.py b/agentops/instrumentation/providers/mem0/memory.py similarity index 100% rename from agentops/instrumentation/mem0/memory.py rename to agentops/instrumentation/providers/mem0/memory.py diff --git a/agentops/instrumentation/openai/__init__.py b/agentops/instrumentation/providers/openai/__init__.py similarity index 84% rename from agentops/instrumentation/openai/__init__.py rename to agentops/instrumentation/providers/openai/__init__.py index 3e160530b..fbe903e09 100644 --- a/agentops/instrumentation/openai/__init__.py +++ b/agentops/instrumentation/providers/openai/__init__.py @@ -12,7 +12,7 @@ LIBRARY_VERSION = _library_info.version # Import after defining constants to avoid circular imports -from agentops.instrumentation.openai.instrumentor import OpenAIInstrumentor # noqa: E402 +from agentops.instrumentation.providers.openai.instrumentor import OpenAIInstrumentor # noqa: E402 __all__ = [ "LIBRARY_NAME", diff --git a/agentops/instrumentation/openai/attributes/__init__.py b/agentops/instrumentation/providers/openai/attributes/__init__.py similarity index 100% rename from agentops/instrumentation/openai/attributes/__init__.py rename to agentops/instrumentation/providers/openai/attributes/__init__.py diff --git a/agentops/instrumentation/openai/attributes/common.py b/agentops/instrumentation/providers/openai/attributes/common.py similarity index 92% rename from agentops/instrumentation/openai/attributes/common.py rename to agentops/instrumentation/providers/openai/attributes/common.py index f7f651d97..abb1a8702 100644 --- a/agentops/instrumentation/openai/attributes/common.py +++ b/agentops/instrumentation/providers/openai/attributes/common.py @@ -1,9 +1,9 @@ from typing import Optional, Tuple, Dict from agentops.logging import logger from agentops.semconv import InstrumentationAttributes -from agentops.instrumentation.openai import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.providers.openai import LIBRARY_NAME, LIBRARY_VERSION from agentops.instrumentation.common.attributes import AttributeMap, get_common_attributes -from agentops.instrumentation.openai.attributes.response import ( +from agentops.instrumentation.providers.openai.attributes.response import ( get_response_kwarg_attributes, get_response_response_attributes, ) diff --git a/agentops/instrumentation/openai/attributes/response.py b/agentops/instrumentation/providers/openai/attributes/response.py similarity index 100% rename from agentops/instrumentation/openai/attributes/response.py rename to agentops/instrumentation/providers/openai/attributes/response.py diff --git a/agentops/instrumentation/openai/attributes/tools.py b/agentops/instrumentation/providers/openai/attributes/tools.py similarity index 100% rename from agentops/instrumentation/openai/attributes/tools.py rename to agentops/instrumentation/providers/openai/attributes/tools.py diff --git a/agentops/instrumentation/openai/config.py b/agentops/instrumentation/providers/openai/config.py similarity index 100% rename from agentops/instrumentation/openai/config.py rename to agentops/instrumentation/providers/openai/config.py diff --git a/agentops/instrumentation/openai/instrumentor.py b/agentops/instrumentation/providers/openai/instrumentor.py similarity index 95% rename from agentops/instrumentation/openai/instrumentor.py rename to agentops/instrumentation/providers/openai/instrumentor.py index 3371e4b12..1efbe0802 100644 --- a/agentops/instrumentation/openai/instrumentor.py +++ b/agentops/instrumentation/providers/openai/instrumentor.py @@ -21,11 +21,11 @@ StandardMetrics, MetricsRecorder, ) -from agentops.instrumentation.openai import LIBRARY_NAME, LIBRARY_VERSION -from agentops.instrumentation.openai.attributes.common import get_response_attributes -from agentops.instrumentation.openai.config import Config -from agentops.instrumentation.openai.utils import is_openai_v1 -from agentops.instrumentation.openai.wrappers import ( +from agentops.instrumentation.providers.openai import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.providers.openai.attributes.common import get_response_attributes +from agentops.instrumentation.providers.openai.config import Config +from agentops.instrumentation.providers.openai.utils import is_openai_v1 +from agentops.instrumentation.providers.openai.wrappers import ( handle_chat_attributes, handle_completion_attributes, handle_embeddings_attributes, @@ -36,7 +36,7 @@ handle_run_stream_attributes, handle_messages_attributes, ) -from agentops.instrumentation.openai.v0 import OpenAIV0Instrumentor +from agentops.instrumentation.providers.openai.v0 import OpenAIV0Instrumentor from agentops.semconv import Meters from opentelemetry.metrics import Meter diff --git a/agentops/instrumentation/openai/utils.py b/agentops/instrumentation/providers/openai/utils.py similarity index 93% rename from agentops/instrumentation/openai/utils.py rename to agentops/instrumentation/providers/openai/utils.py index 3eb0e7fbd..5db988a3e 100644 --- a/agentops/instrumentation/openai/utils.py +++ b/agentops/instrumentation/providers/openai/utils.py @@ -7,7 +7,7 @@ import os from importlib.metadata import version -from agentops.instrumentation.openai.config import Config +from agentops.instrumentation.providers.openai.config import Config # Get OpenAI version try: diff --git a/agentops/instrumentation/openai/v0.py b/agentops/instrumentation/providers/openai/v0.py similarity index 96% rename from agentops/instrumentation/openai/v0.py rename to agentops/instrumentation/providers/openai/v0.py index 5762a11f8..b6ee17376 100644 --- a/agentops/instrumentation/openai/v0.py +++ b/agentops/instrumentation/providers/openai/v0.py @@ -10,12 +10,12 @@ from opentelemetry.metrics import get_meter from wrapt import wrap_function_wrapper -from agentops.instrumentation.openai import LIBRARY_NAME, LIBRARY_VERSION -from agentops.instrumentation.openai.utils import is_metrics_enabled +from agentops.instrumentation.providers.openai import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.providers.openai.utils import is_metrics_enabled from agentops.semconv import Meters # Import our wrappers -from agentops.instrumentation.openai.v0_wrappers import ( +from agentops.instrumentation.providers.openai.v0_wrappers import ( chat_wrapper, achat_wrapper, completion_wrapper, diff --git a/agentops/instrumentation/openai/v0_wrappers.py b/agentops/instrumentation/providers/openai/v0_wrappers.py similarity index 99% rename from agentops/instrumentation/openai/v0_wrappers.py rename to agentops/instrumentation/providers/openai/v0_wrappers.py index 6c445c47b..1b88a008d 100644 --- a/agentops/instrumentation/openai/v0_wrappers.py +++ b/agentops/instrumentation/providers/openai/v0_wrappers.py @@ -12,8 +12,8 @@ from opentelemetry import context as context_api from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY -from agentops.instrumentation.openai.utils import is_metrics_enabled -from agentops.instrumentation.openai.wrappers.shared import should_send_prompts +from agentops.instrumentation.providers.openai.utils import is_metrics_enabled +from agentops.instrumentation.providers.openai.wrappers.shared import should_send_prompts from agentops.semconv import SpanAttributes diff --git a/agentops/instrumentation/openai/wrappers/__init__.py b/agentops/instrumentation/providers/openai/wrappers/__init__.py similarity index 56% rename from agentops/instrumentation/openai/wrappers/__init__.py rename to agentops/instrumentation/providers/openai/wrappers/__init__.py index ed9bd6a58..5348bd91d 100644 --- a/agentops/instrumentation/openai/wrappers/__init__.py +++ b/agentops/instrumentation/providers/openai/wrappers/__init__.py @@ -3,11 +3,11 @@ This package contains wrapper implementations for different OpenAI API endpoints. """ -from agentops.instrumentation.openai.wrappers.chat import handle_chat_attributes -from agentops.instrumentation.openai.wrappers.completion import handle_completion_attributes -from agentops.instrumentation.openai.wrappers.embeddings import handle_embeddings_attributes -from agentops.instrumentation.openai.wrappers.image_gen import handle_image_gen_attributes -from agentops.instrumentation.openai.wrappers.assistant import ( +from agentops.instrumentation.providers.openai.wrappers.chat import handle_chat_attributes +from agentops.instrumentation.providers.openai.wrappers.completion import handle_completion_attributes +from agentops.instrumentation.providers.openai.wrappers.embeddings import handle_embeddings_attributes +from agentops.instrumentation.providers.openai.wrappers.image_gen import handle_image_gen_attributes +from agentops.instrumentation.providers.openai.wrappers.assistant import ( handle_assistant_attributes, handle_run_attributes, handle_run_retrieve_attributes, diff --git a/agentops/instrumentation/openai/wrappers/assistant.py b/agentops/instrumentation/providers/openai/wrappers/assistant.py similarity index 98% rename from agentops/instrumentation/openai/wrappers/assistant.py rename to agentops/instrumentation/providers/openai/wrappers/assistant.py index 011f29a30..ad7ba662b 100644 --- a/agentops/instrumentation/openai/wrappers/assistant.py +++ b/agentops/instrumentation/providers/openai/wrappers/assistant.py @@ -7,12 +7,12 @@ import logging from typing import Any, Dict, Optional, Tuple -from agentops.instrumentation.openai.utils import is_openai_v1 -from agentops.instrumentation.openai.wrappers.shared import ( +from agentops.instrumentation.providers.openai.utils import is_openai_v1 +from agentops.instrumentation.providers.openai.wrappers.shared import ( model_as_dict, should_send_prompts, ) -from agentops.instrumentation.openai.config import Config +from agentops.instrumentation.providers.openai.config import Config from agentops.instrumentation.common.attributes import AttributeMap from agentops.semconv import SpanAttributes diff --git a/agentops/instrumentation/openai/wrappers/chat.py b/agentops/instrumentation/providers/openai/wrappers/chat.py similarity index 98% rename from agentops/instrumentation/openai/wrappers/chat.py rename to agentops/instrumentation/providers/openai/wrappers/chat.py index bec136332..65b5e9181 100644 --- a/agentops/instrumentation/openai/wrappers/chat.py +++ b/agentops/instrumentation/providers/openai/wrappers/chat.py @@ -8,8 +8,8 @@ import logging from typing import Any, Dict, Optional, Tuple -from agentops.instrumentation.openai.utils import is_openai_v1 -from agentops.instrumentation.openai.wrappers.shared import ( +from agentops.instrumentation.providers.openai.utils import is_openai_v1 +from agentops.instrumentation.providers.openai.wrappers.shared import ( model_as_dict, should_send_prompts, ) diff --git a/agentops/instrumentation/openai/wrappers/completion.py b/agentops/instrumentation/providers/openai/wrappers/completion.py similarity index 96% rename from agentops/instrumentation/openai/wrappers/completion.py rename to agentops/instrumentation/providers/openai/wrappers/completion.py index 0a1f0512b..b7666e714 100644 --- a/agentops/instrumentation/openai/wrappers/completion.py +++ b/agentops/instrumentation/providers/openai/wrappers/completion.py @@ -6,8 +6,8 @@ import logging from typing import Any, Dict, Optional, Tuple -from agentops.instrumentation.openai.utils import is_openai_v1 -from agentops.instrumentation.openai.wrappers.shared import ( +from agentops.instrumentation.providers.openai.utils import is_openai_v1 +from agentops.instrumentation.providers.openai.wrappers.shared import ( model_as_dict, should_send_prompts, ) diff --git a/agentops/instrumentation/openai/wrappers/embeddings.py b/agentops/instrumentation/providers/openai/wrappers/embeddings.py similarity index 96% rename from agentops/instrumentation/openai/wrappers/embeddings.py rename to agentops/instrumentation/providers/openai/wrappers/embeddings.py index 3c8da41af..61de2f563 100644 --- a/agentops/instrumentation/openai/wrappers/embeddings.py +++ b/agentops/instrumentation/providers/openai/wrappers/embeddings.py @@ -6,8 +6,8 @@ import logging from typing import Any, Dict, Optional, Tuple -from agentops.instrumentation.openai.utils import is_openai_v1 -from agentops.instrumentation.openai.wrappers.shared import ( +from agentops.instrumentation.providers.openai.utils import is_openai_v1 +from agentops.instrumentation.providers.openai.wrappers.shared import ( model_as_dict, should_send_prompts, ) diff --git a/agentops/instrumentation/openai/wrappers/image_gen.py b/agentops/instrumentation/providers/openai/wrappers/image_gen.py similarity index 96% rename from agentops/instrumentation/openai/wrappers/image_gen.py rename to agentops/instrumentation/providers/openai/wrappers/image_gen.py index 4fd4aa211..ccbc2a0c1 100644 --- a/agentops/instrumentation/openai/wrappers/image_gen.py +++ b/agentops/instrumentation/providers/openai/wrappers/image_gen.py @@ -6,7 +6,7 @@ import logging from typing import Any, Dict, Optional, Tuple -from agentops.instrumentation.openai.wrappers.shared import model_as_dict +from agentops.instrumentation.providers.openai.wrappers.shared import model_as_dict from agentops.instrumentation.common.attributes import AttributeMap from agentops.semconv import SpanAttributes diff --git a/agentops/instrumentation/openai/wrappers/shared.py b/agentops/instrumentation/providers/openai/wrappers/shared.py similarity index 93% rename from agentops/instrumentation/openai/wrappers/shared.py rename to agentops/instrumentation/providers/openai/wrappers/shared.py index c969437f1..29989a91e 100644 --- a/agentops/instrumentation/openai/wrappers/shared.py +++ b/agentops/instrumentation/providers/openai/wrappers/shared.py @@ -13,7 +13,7 @@ import openai from opentelemetry import context as context_api -from agentops.instrumentation.openai.utils import is_openai_v1 +from agentops.instrumentation.providers.openai.utils import is_openai_v1 logger = logging.getLogger(__name__) @@ -56,7 +56,7 @@ def model_as_dict(model: Any) -> Dict[str, Any]: def get_token_count_from_string(string: str, model_name: str) -> Optional[int]: """Get token count from a string using tiktoken.""" - from agentops.instrumentation.openai.utils import should_record_stream_token_usage + from agentops.instrumentation.providers.openai.utils import should_record_stream_token_usage if not should_record_stream_token_usage(): return None diff --git a/agentops/instrumentation/concurrent_futures/__init__.py b/agentops/instrumentation/utilities/concurrent_futures/__init__.py similarity index 100% rename from agentops/instrumentation/concurrent_futures/__init__.py rename to agentops/instrumentation/utilities/concurrent_futures/__init__.py diff --git a/agentops/instrumentation/concurrent_futures/instrumentation.py b/agentops/instrumentation/utilities/concurrent_futures/instrumentation.py similarity index 100% rename from agentops/instrumentation/concurrent_futures/instrumentation.py rename to agentops/instrumentation/utilities/concurrent_futures/instrumentation.py diff --git a/examples/anthropic/anthropic-example-async.py b/examples/anthropic/anthropic-example-async.py index b4ea04a54..f1bff79a5 100644 --- a/examples/anthropic/anthropic-example-async.py +++ b/examples/anthropic/anthropic-example-async.py @@ -106,5 +106,5 @@ async def main(): # Run the main function -# await main() +asyncio.run(main()) # We can observe the trace in the AgentOps dashboard by going to the trace URL provided above. diff --git a/examples/google_adk/human_approval.py b/examples/google_adk/human_approval.py index 69977ead8..1cbe55e7f 100644 --- a/examples/google_adk/human_approval.py +++ b/examples/google_adk/human_approval.py @@ -21,6 +21,7 @@ import nest_asyncio import agentops from dotenv import load_dotenv +import asyncio # ## 2. Configuration and Initialization # Load environment variables (especially `AGENTOPS_API_KEY` and your Google API key for Gemini) and initialize AgentOps. @@ -203,7 +204,7 @@ async def main_notebook(): try: - # asyncio.run(main_notebook()) + asyncio.run(main_notebook()) agentops.end_trace(end_state="Success") except Exception as e: print(f"Error: {e}") diff --git a/examples/openai/openai_example_async.py b/examples/openai/openai_example_async.py index 7356adba9..002893172 100644 --- a/examples/openai/openai_example_async.py +++ b/examples/openai/openai_example_async.py @@ -13,6 +13,7 @@ import agentops import os from dotenv import load_dotenv +import asyncio # Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables load_dotenv() @@ -73,7 +74,7 @@ async def main_stream(): print(chunk.choices[0].delta.content or "", end="") -# await main_stream() +asyncio.run(main_stream()) agentops.end_trace(tracer, end_state="Success") # Note that the response is a generator that yields chunks of the story. We can track this with AgentOps by navigating to the trace url and viewing the run. diff --git a/examples/openai_agents/agent_patterns.py b/examples/openai_agents/agent_patterns.py index c8a6b2055..5482fc7b4 100644 --- a/examples/openai_agents/agent_patterns.py +++ b/examples/openai_agents/agent_patterns.py @@ -717,7 +717,9 @@ async def run_streaming_guardrails_demo(): # Run the demo -# await run_streaming_guardrails_demo() +if __name__ == "__main__": + # Run the streaming guardrails demo + asyncio.run(run_streaming_guardrails_demo()) # End the AgentOps trace session agentops.end_trace(tracer, end_state="Success") diff --git a/examples/smolagents/multi_smolagents_system.py b/examples/smolagents/multi_smolagents_system.py index c14740b30..fe2c7a254 100644 --- a/examples/smolagents/multi_smolagents_system.py +++ b/examples/smolagents/multi_smolagents_system.py @@ -96,8 +96,10 @@ def visit_webpage(url: str) -> str: tools=[DuckDuckGoSearchTool(), visit_webpage], model=model, ) - +abc = "Manager Agent" manager_agent = CodeAgent( + name=abc, + description="This agent manages the web search agent and coordinates its tasks.", tools=[], model=model, managed_agents=[web_agent], diff --git a/tests/unit/instrumentation/anthropic/test_attributes.py b/tests/unit/instrumentation/anthropic/test_attributes.py index 6354af2ea..a7d634d9c 100644 --- a/tests/unit/instrumentation/anthropic/test_attributes.py +++ b/tests/unit/instrumentation/anthropic/test_attributes.py @@ -7,16 +7,16 @@ ToolAttributes, ToolStatus, ) -from agentops.instrumentation.anthropic.attributes.common import ( +from agentops.instrumentation.providers.anthropic.attributes.common import ( get_common_instrumentation_attributes, extract_request_attributes, ) -from agentops.instrumentation.anthropic.attributes.message import ( +from agentops.instrumentation.providers.anthropic.attributes.message import ( get_message_request_attributes, get_stream_attributes, get_stream_event_attributes, ) -from agentops.instrumentation.anthropic.attributes.tools import ( +from agentops.instrumentation.providers.anthropic.attributes.tools import ( extract_tool_definitions, extract_tool_use_blocks, get_tool_attributes, diff --git a/tests/unit/instrumentation/anthropic/test_event_handler.py b/tests/unit/instrumentation/anthropic/test_event_handler.py index 18830ff92..2b6f23833 100644 --- a/tests/unit/instrumentation/anthropic/test_event_handler.py +++ b/tests/unit/instrumentation/anthropic/test_event_handler.py @@ -1,7 +1,7 @@ from unittest.mock import MagicMock from opentelemetry.trace import Span -from agentops.instrumentation.anthropic.event_handler_wrapper import EventHandleWrapper +from agentops.instrumentation.providers.anthropic.event_handler_wrapper import EventHandleWrapper from agentops.semconv import CoreAttributes diff --git a/tests/unit/instrumentation/anthropic/test_instrumentor.py b/tests/unit/instrumentation/anthropic/test_instrumentor.py index 3123fb80e..eae26b6f7 100644 --- a/tests/unit/instrumentation/anthropic/test_instrumentor.py +++ b/tests/unit/instrumentation/anthropic/test_instrumentor.py @@ -1,7 +1,7 @@ from unittest.mock import patch, MagicMock, ANY -from agentops.instrumentation.anthropic.instrumentor import AnthropicInstrumentor -from agentops.instrumentation.anthropic import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.providers.anthropic.instrumentor import AnthropicInstrumentor +from agentops.instrumentation.providers.anthropic import LIBRARY_NAME, LIBRARY_VERSION def test_instrumentor_initialization(): @@ -62,7 +62,7 @@ def test_instrumentor_wraps_methods(mock_tracer, mock_meter): patch("agentops.instrumentation.common.instrumentor.get_tracer", return_value=mock_tracer), patch("agentops.instrumentation.common.instrumentor.get_meter", return_value=mock_meter), patch("agentops.instrumentation.common.wrappers.wrap_function_wrapper") as mock_wrap_function, - patch("agentops.instrumentation.anthropic.instrumentor.wrap_function_wrapper") as mock_stream_wrap, + patch("agentops.instrumentation.providers.anthropic.instrumentor.wrap_function_wrapper") as mock_stream_wrap, ): instrumentor._instrument() @@ -106,10 +106,10 @@ def test_instrumentor_uninstrument(mock_tracer, mock_meter): patch("agentops.instrumentation.common.instrumentor.get_meter", return_value=mock_meter), patch("agentops.instrumentation.common.instrumentor.unwrap", mock_unwrap), # Patch where it's imported patch( - "agentops.instrumentation.anthropic.instrumentor.otel_unwrap" + "agentops.instrumentation.providers.anthropic.instrumentor.otel_unwrap" ) as mock_otel_unwrap, # Patch in anthropic module patch("agentops.instrumentation.common.wrappers.wrap_function_wrapper"), - patch("agentops.instrumentation.anthropic.instrumentor.wrap_function_wrapper"), + patch("agentops.instrumentation.providers.anthropic.instrumentor.wrap_function_wrapper"), ): # Instrument first instrumentor._instrument() @@ -136,7 +136,7 @@ def test_instrumentor_handles_missing_methods(mock_tracer, mock_meter): patch("agentops.instrumentation.common.instrumentor.get_tracer", return_value=mock_tracer), patch("agentops.instrumentation.common.instrumentor.get_meter", return_value=mock_meter), patch("agentops.instrumentation.common.wrappers.wrap", mock_wrap), - patch("agentops.instrumentation.anthropic.instrumentor.wrap_function_wrapper", mock_wrap_function), + patch("agentops.instrumentation.providers.anthropic.instrumentor.wrap_function_wrapper", mock_wrap_function), ): # Should not raise exceptions even when wrapping fails instrumentor._instrument() diff --git a/tests/unit/instrumentation/anthropic/test_stream_wrapper.py b/tests/unit/instrumentation/anthropic/test_stream_wrapper.py index 055f64405..c20028234 100644 --- a/tests/unit/instrumentation/anthropic/test_stream_wrapper.py +++ b/tests/unit/instrumentation/anthropic/test_stream_wrapper.py @@ -2,7 +2,7 @@ from unittest.mock import MagicMock from opentelemetry.trace import SpanKind -from agentops.instrumentation.anthropic.stream_wrapper import ( +from agentops.instrumentation.providers.anthropic.stream_wrapper import ( messages_stream_wrapper, messages_stream_async_wrapper, AsyncStreamContextManagerWrapper, diff --git a/tests/unit/instrumentation/openai_agents/test_openai_agents.py b/tests/unit/instrumentation/openai_agents/test_openai_agents.py index 57421c538..f68420cb8 100644 --- a/tests/unit/instrumentation/openai_agents/test_openai_agents.py +++ b/tests/unit/instrumentation/openai_agents/test_openai_agents.py @@ -20,9 +20,9 @@ from unittest.mock import MagicMock, patch from opentelemetry.trace import StatusCode -from agentops.instrumentation.openai_agents.instrumentor import OpenAIAgentsInstrumentor -from agentops.instrumentation.openai_agents.exporter import OpenAIAgentsExporter -from agentops.instrumentation.openai_agents.processor import OpenAIAgentsProcessor +from agentops.instrumentation.agentic.openai_agents.instrumentor import OpenAIAgentsInstrumentor +from agentops.instrumentation.agentic.openai_agents.exporter import OpenAIAgentsExporter +from agentops.instrumentation.agentic.openai_agents.processor import OpenAIAgentsProcessor from agentops.semconv import ( SpanAttributes, MessageAttributes, @@ -106,7 +106,7 @@ def test_response_api_span_serialization(self, instrumentation): # Mock the attribute extraction functions to return the expected message attributes with patch( - "agentops.instrumentation.openai_agents.attributes.completion.get_raw_response_attributes" + "agentops.instrumentation.agentic.openai_agents.attributes.completion.get_raw_response_attributes" ) as mock_response_attrs: # Set up the mock to return attributes we want to verify mock_response_attrs.return_value = { @@ -138,7 +138,7 @@ def test_response_api_span_serialization(self, instrumentation): # Process the mock span with the exporter with patch( - "agentops.instrumentation.openai_agents.attributes.completion.get_generation_output_attributes" + "agentops.instrumentation.agentic.openai_agents.attributes.completion.get_generation_output_attributes" ) as mock_gen_output: mock_gen_output.return_value = mock_response_attrs.return_value process_with_instrumentor(mock_span, OpenAIAgentsExporter, captured_attributes) @@ -176,7 +176,7 @@ def test_tool_calls_span_serialization(self, instrumentation): """ # Mock the attribute extraction functions to return the expected message attributes with patch( - "agentops.instrumentation.openai_agents.attributes.completion.get_raw_response_attributes" + "agentops.instrumentation.agentic.openai_agents.attributes.completion.get_raw_response_attributes" ) as mock_response_attrs: # Set up the mock to return attributes we want to verify mock_response_attrs.return_value = { @@ -215,7 +215,7 @@ def test_tool_calls_span_serialization(self, instrumentation): # Process the mock span with the exporter with patch( - "agentops.instrumentation.openai_agents.attributes.completion.get_generation_output_attributes" + "agentops.instrumentation.agentic.openai_agents.attributes.completion.get_generation_output_attributes" ) as mock_gen_output: mock_gen_output.return_value = mock_response_attrs.return_value process_with_instrumentor(mock_span, OpenAIAgentsExporter, captured_attributes) diff --git a/tests/unit/instrumentation/openai_agents/test_openai_agents_attributes.py b/tests/unit/instrumentation/openai_agents/test_openai_agents_attributes.py index 35085eef7..d05f79565 100644 --- a/tests/unit/instrumentation/openai_agents/test_openai_agents_attributes.py +++ b/tests/unit/instrumentation/openai_agents/test_openai_agents_attributes.py @@ -11,10 +11,10 @@ import pytest from unittest.mock import MagicMock, patch -from agentops.instrumentation.openai_agents import LIBRARY_NAME +from agentops.instrumentation.agentic.openai_agents import LIBRARY_NAME # Import common attribute functions -from agentops.instrumentation.openai_agents.attributes.common import ( +from agentops.instrumentation.agentic.openai_agents.attributes.common import ( get_agent_span_attributes, get_function_span_attributes, get_generation_span_attributes, @@ -25,18 +25,18 @@ ) # Import model-related functions -from agentops.instrumentation.openai_agents.attributes.model import ( +from agentops.instrumentation.agentic.openai_agents.attributes.model import ( get_model_attributes, ) # Import completion processing functions -from agentops.instrumentation.openai_agents.attributes.completion import ( +from agentops.instrumentation.agentic.openai_agents.attributes.completion import ( get_chat_completions_attributes, get_raw_response_attributes, ) # Import token processing functions -from agentops.instrumentation.openai_agents.attributes.tokens import ( +from agentops.instrumentation.agentic.openai_agents.attributes.tokens import ( process_token_usage, extract_nested_usage, get_token_metric_attributes, @@ -131,8 +131,8 @@ def default(self, obj): with patch("json.dumps", side_effect=json_dumps_wrapper): with patch("importlib.metadata.version", return_value="1.0.0"): - with patch("agentops.instrumentation.openai_agents.LIBRARY_NAME", "openai"): - with patch("agentops.instrumentation.openai_agents.LIBRARY_VERSION", "1.0.0"): + with patch("agentops.instrumentation.agentic.openai_agents.LIBRARY_NAME", "openai"): + with patch("agentops.instrumentation.agentic.openai_agents.LIBRARY_VERSION", "1.0.0"): yield @@ -303,7 +303,7 @@ def __init__(self): # Patch the model_to_dict function to avoid circular references with patch( - "agentops.instrumentation.openai_agents.attributes.completion.model_to_dict", + "agentops.instrumentation.agentic.openai_agents.attributes.completion.model_to_dict", side_effect=lambda x: x if isinstance(x, dict) else {}, ): # Extract attributes diff --git a/tests/unit/instrumentation/openai_core/test_common_attributes.py b/tests/unit/instrumentation/openai_core/test_common_attributes.py index 45ea06960..b1f881fb4 100644 --- a/tests/unit/instrumentation/openai_core/test_common_attributes.py +++ b/tests/unit/instrumentation/openai_core/test_common_attributes.py @@ -8,11 +8,11 @@ from unittest.mock import patch -from agentops.instrumentation.openai.attributes.common import ( +from agentops.instrumentation.providers.openai.attributes.common import ( get_common_instrumentation_attributes, get_response_attributes, ) -from agentops.instrumentation.openai import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.providers.openai import LIBRARY_NAME, LIBRARY_VERSION from agentops.semconv import SpanAttributes, MessageAttributes, InstrumentationAttributes @@ -54,7 +54,7 @@ def test_get_response_attributes_with_kwargs(self): # Mock the kwarg extraction function with patch( - "agentops.instrumentation.openai.attributes.common.get_response_kwarg_attributes" + "agentops.instrumentation.providers.openai.attributes.common.get_response_kwarg_attributes" ) as mock_kwarg_attributes: mock_kwarg_attributes.return_value = { MessageAttributes.PROMPT_ROLE.format(i=0): "user", @@ -101,7 +101,7 @@ def test_get_response_attributes_with_return_value(self): ) # Use direct patching of Response class check instead - with patch("agentops.instrumentation.openai.attributes.common.Response", MockResponse): + with patch("agentops.instrumentation.providers.openai.attributes.common.Response", MockResponse): # Call the function attributes = get_response_attributes(return_value=response) @@ -145,7 +145,7 @@ def test_get_response_attributes_with_both(self): ) # Instead of mocking the internal functions, test the integration directly - with patch("agentops.instrumentation.openai.attributes.common.Response", MockResponse): + with patch("agentops.instrumentation.providers.openai.attributes.common.Response", MockResponse): # Call the function attributes = get_response_attributes(kwargs=kwargs, return_value=response) @@ -159,7 +159,7 @@ def test_get_response_attributes_with_unexpected_return_type(self): not_a_response = "not a response" # Should log a debug message but not raise an exception - with patch("agentops.instrumentation.openai.attributes.common.logger.debug") as mock_logger: + with patch("agentops.instrumentation.providers.openai.attributes.common.logger.debug") as mock_logger: # Call the function attributes = get_response_attributes(return_value=not_a_response) diff --git a/tests/unit/instrumentation/openai_core/test_instrumentor.py b/tests/unit/instrumentation/openai_core/test_instrumentor.py index 1bc04fa70..63e9b1ea4 100644 --- a/tests/unit/instrumentation/openai_core/test_instrumentor.py +++ b/tests/unit/instrumentation/openai_core/test_instrumentor.py @@ -15,7 +15,7 @@ from unittest.mock import MagicMock, patch -from agentops.instrumentation.openai.instrumentor import OpenAIInstrumentor +from agentops.instrumentation.providers.openai.instrumentor import OpenAIInstrumentor from agentops.instrumentation.common.wrappers import WrapConfig diff --git a/tests/unit/instrumentation/openai_core/test_response_attributes.py b/tests/unit/instrumentation/openai_core/test_response_attributes.py index 660302ab0..24809423f 100644 --- a/tests/unit/instrumentation/openai_core/test_response_attributes.py +++ b/tests/unit/instrumentation/openai_core/test_response_attributes.py @@ -10,7 +10,7 @@ import os from unittest.mock import MagicMock, patch -from agentops.instrumentation.openai.attributes.response import ( +from agentops.instrumentation.providers.openai.attributes.response import ( get_response_kwarg_attributes, get_response_response_attributes, get_response_output_attributes, @@ -284,7 +284,7 @@ def test_get_response_kwarg_attributes_with_unsupported_input(self): } # Should not raise an exception but log a debug message - with patch("agentops.instrumentation.openai.attributes.response.logger.debug") as mock_logger: + with patch("agentops.instrumentation.providers.openai.attributes.response.logger.debug") as mock_logger: attributes = get_response_kwarg_attributes(kwargs) # Verify the debug message was logged @@ -330,8 +330,12 @@ def test_get_response_response_attributes(self): ) # Patch the Response and other type checks for simpler testing - with patch("agentops.instrumentation.openai.attributes.response.ResponseOutputMessage", MockOutputMessage): - with patch("agentops.instrumentation.openai.attributes.response.ResponseOutputText", MockOutputText): + with patch( + "agentops.instrumentation.providers.openai.attributes.response.ResponseOutputMessage", MockOutputMessage + ): + with patch( + "agentops.instrumentation.providers.openai.attributes.response.ResponseOutputText", MockOutputText + ): # Extract attributes attributes = get_response_response_attributes(mock_response) @@ -357,10 +361,15 @@ def test_get_response_output_attributes_simple(self): output = [] # Empty list is fine for this test # Patch all the type checks to make testing simpler - with patch("agentops.instrumentation.openai.attributes.response.ResponseOutputMessage", MockOutputMessage): - with patch("agentops.instrumentation.openai.attributes.response.ResponseOutputText", MockOutputText): + with patch( + "agentops.instrumentation.providers.openai.attributes.response.ResponseOutputMessage", MockOutputMessage + ): + with patch( + "agentops.instrumentation.providers.openai.attributes.response.ResponseOutputText", MockOutputText + ): with patch( - "agentops.instrumentation.openai.attributes.response.ResponseFunctionToolCall", MockFunctionToolCall + "agentops.instrumentation.providers.openai.attributes.response.ResponseFunctionToolCall", + MockFunctionToolCall, ): result = get_response_output_attributes(output) @@ -373,7 +382,7 @@ def test_get_response_output_message_attributes(self): # and can be called without exception # Patch the ResponseOutputText class to make testing simpler - with patch("agentops.instrumentation.openai.attributes.response.ResponseOutputText", MockOutputText): + with patch("agentops.instrumentation.providers.openai.attributes.response.ResponseOutputText", MockOutputText): # Create a minimal mock with required attributes message = MockOutputMessage( { @@ -415,7 +424,7 @@ def test_get_response_output_text_attributes(self): # We'll test by using patch to simulate the extraction with patch( - "agentops.instrumentation.openai.attributes.response._extract_attributes_from_mapping_with_index" + "agentops.instrumentation.providers.openai.attributes.response._extract_attributes_from_mapping_with_index" ) as mock_extract: # Set up the mock to return expected attributes expected_attributes = { @@ -489,18 +498,30 @@ def test_get_response_output_attributes_comprehensive(self): # Patch all the necessary type checks and logger with ( - patch("agentops.instrumentation.openai.attributes.response.ResponseOutputMessage", MockOutputMessage), - patch("agentops.instrumentation.openai.attributes.response.ResponseOutputText", MockOutputText), - patch("agentops.instrumentation.openai.attributes.response.ResponseFunctionToolCall", MockFunctionToolCall), patch( - "agentops.instrumentation.openai.attributes.response.ResponseFunctionWebSearch", MockFunctionWebSearch + "agentops.instrumentation.providers.openai.attributes.response.ResponseOutputMessage", MockOutputMessage + ), + patch("agentops.instrumentation.providers.openai.attributes.response.ResponseOutputText", MockOutputText), + patch( + "agentops.instrumentation.providers.openai.attributes.response.ResponseFunctionToolCall", + MockFunctionToolCall, + ), + patch( + "agentops.instrumentation.providers.openai.attributes.response.ResponseFunctionWebSearch", + MockFunctionWebSearch, + ), + patch( + "agentops.instrumentation.providers.openai.attributes.response.ResponseFileSearchToolCall", + MockFileSearchToolCall, ), patch( - "agentops.instrumentation.openai.attributes.response.ResponseFileSearchToolCall", MockFileSearchToolCall + "agentops.instrumentation.providers.openai.attributes.response.ResponseComputerToolCall", + MockComputerToolCall, ), - patch("agentops.instrumentation.openai.attributes.response.ResponseComputerToolCall", MockComputerToolCall), - patch("agentops.instrumentation.openai.attributes.response.ResponseReasoningItem", MockReasoningItem), - patch("agentops.instrumentation.openai.attributes.response.logger.debug") as mock_logger, + patch( + "agentops.instrumentation.providers.openai.attributes.response.ResponseReasoningItem", MockReasoningItem + ), + patch("agentops.instrumentation.providers.openai.attributes.response.logger.debug") as mock_logger, ): # Test with an output list containing all different types of output items output = [message, tool_call, web_search, file_search, computer_call, reasoning_item, unrecognized_item] @@ -547,10 +568,10 @@ def test_get_response_tools_attributes(self): ) # Patch all tool types to make testing simpler - with patch("agentops.instrumentation.openai.attributes.response.FunctionTool", MockFunctionTool): - with patch("agentops.instrumentation.openai.attributes.response.WebSearchTool", MagicMock): - with patch("agentops.instrumentation.openai.attributes.response.FileSearchTool", MagicMock): - with patch("agentops.instrumentation.openai.attributes.response.ComputerTool", MagicMock): + with patch("agentops.instrumentation.providers.openai.attributes.response.FunctionTool", MockFunctionTool): + with patch("agentops.instrumentation.providers.openai.attributes.response.WebSearchTool", MagicMock): + with patch("agentops.instrumentation.providers.openai.attributes.response.FileSearchTool", MagicMock): + with patch("agentops.instrumentation.providers.openai.attributes.response.ComputerTool", MagicMock): # Test with a function tool tools = [function_tool] @@ -579,7 +600,7 @@ def test_get_response_tool_web_search_attributes(self): ) # Call the function directly - with patch("agentops.instrumentation.openai.attributes.response.WebSearchTool", MockWebSearchTool): + with patch("agentops.instrumentation.providers.openai.attributes.response.WebSearchTool", MockWebSearchTool): result = get_response_tool_web_search_attributes(web_search_tool, 0) # Verify attributes @@ -609,7 +630,7 @@ def test_get_response_tool_file_search_attributes(self): ) # Call the function directly - with patch("agentops.instrumentation.openai.attributes.response.FileSearchTool", MockFileSearchTool): + with patch("agentops.instrumentation.providers.openai.attributes.response.FileSearchTool", MockFileSearchTool): result = get_response_tool_file_search_attributes(file_search_tool, 0) # Verify attributes @@ -631,7 +652,7 @@ def test_get_response_tool_computer_attributes(self): ) # Call the function directly - with patch("agentops.instrumentation.openai.attributes.response.ComputerTool", MockComputerTool): + with patch("agentops.instrumentation.providers.openai.attributes.response.ComputerTool", MockComputerTool): result = get_response_tool_computer_attributes(computer_tool, 0) # Verify attributes @@ -649,8 +670,10 @@ def test_get_response_usage_attributes(self): # Create a more comprehensive test for usage attributes # Patch the OutputTokensDetails class to make testing simpler - with patch("agentops.instrumentation.openai.attributes.response.OutputTokensDetails", MockOutputTokensDetails): - with patch("agentops.instrumentation.openai.attributes.response.InputTokensDetails", MagicMock): + with patch( + "agentops.instrumentation.providers.openai.attributes.response.OutputTokensDetails", MockOutputTokensDetails + ): + with patch("agentops.instrumentation.providers.openai.attributes.response.InputTokensDetails", MagicMock): # Test with all fields usage = MockResponseUsage( { From 369df8b33bf61a6ab8babd6cebdf196bdf84e989 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Wed, 18 Jun 2025 15:38:38 +0530 Subject: [PATCH 4/8] refactor: rename variable and improve manager agent initialization --- examples/smolagents/multi_smolagents_system.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/examples/smolagents/multi_smolagents_system.py b/examples/smolagents/multi_smolagents_system.py index fe2c7a254..c14740b30 100644 --- a/examples/smolagents/multi_smolagents_system.py +++ b/examples/smolagents/multi_smolagents_system.py @@ -96,10 +96,8 @@ def visit_webpage(url: str) -> str: tools=[DuckDuckGoSearchTool(), visit_webpage], model=model, ) -abc = "Manager Agent" + manager_agent = CodeAgent( - name=abc, - description="This agent manages the web search agent and coordinates its tasks.", tools=[], model=model, managed_agents=[web_agent], From bbec7413312192a17194ab1d1b875efabe86f948 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Wed, 18 Jun 2025 15:42:35 +0530 Subject: [PATCH 5/8] refactor: enhance error handling and logging for agent and model wrapping --- .../agentic/smolagents/instrumentor.py | 106 +++++++++++++----- 1 file changed, 78 insertions(+), 28 deletions(-) diff --git a/agentops/instrumentation/agentic/smolagents/instrumentor.py b/agentops/instrumentation/agentic/smolagents/instrumentor.py index d19323b72..ff5311316 100644 --- a/agentops/instrumentation/agentic/smolagents/instrumentor.py +++ b/agentops/instrumentation/agentic/smolagents/instrumentor.py @@ -83,18 +83,49 @@ def _custom_wrap(self, **kwargs): This is called after normal wrapping, but we use it for all wrapping since we don't have normal wrapped methods. """ - # Core agent operations - wrap_function_wrapper("smolagents.agents", "CodeAgent.run", self._agent_run_wrapper(self._tracer)) - wrap_function_wrapper("smolagents.agents", "ToolCallingAgent.run", self._agent_run_wrapper(self._tracer)) - - # Tool calling operations - wrap_function_wrapper( - "smolagents.agents", "ToolCallingAgent.execute_tool_call", self._tool_execution_wrapper(self._tracer) - ) + # Core agent operations - wrap with error handling + try: + # Import the module first to ensure it's loaded + import smolagents.agents + + # Check if classes exist before wrapping + if hasattr(smolagents.agents, "CodeAgent"): + wrap_function_wrapper("smolagents.agents", "CodeAgent.run", self._agent_run_wrapper(self._tracer)) + logger.debug("Successfully wrapped CodeAgent.run") + else: + logger.debug("CodeAgent not found in smolagents.agents") + + if hasattr(smolagents.agents, "ToolCallingAgent"): + wrap_function_wrapper( + "smolagents.agents", "ToolCallingAgent.run", self._agent_run_wrapper(self._tracer) + ) + wrap_function_wrapper( + "smolagents.agents", + "ToolCallingAgent.execute_tool_call", + self._tool_execution_wrapper(self._tracer), + ) + logger.debug("Successfully wrapped ToolCallingAgent methods") + else: + logger.debug("ToolCallingAgent not found in smolagents.agents") + except (ImportError, AttributeError) as e: + logger.debug(f"Failed to wrap agent operations: {e}") # Model operations with proper model name extraction - wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate", self._llm_wrapper(self._tracer)) - wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate_stream", self._llm_wrapper(self._tracer)) + try: + # Import the module first to ensure it's loaded + import smolagents.models + + # Check if class exists before wrapping + if hasattr(smolagents.models, "LiteLLMModel"): + wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate", self._llm_wrapper(self._tracer)) + wrap_function_wrapper( + "smolagents.models", "LiteLLMModel.generate_stream", self._llm_wrapper(self._tracer) + ) + logger.debug("Successfully wrapped LiteLLMModel methods") + else: + logger.debug("LiteLLMModel not found in smolagents.models") + except (ImportError, AttributeError) as e: + logger.debug(f"Failed to wrap model operations: {e}") logger.info("SmoLAgents instrumentation enabled") @@ -258,29 +289,48 @@ def _custom_unwrap(self, **kwargs): # Unwrap all instrumented methods from opentelemetry.instrumentation.utils import unwrap + # Try to import modules before attempting unwrap try: - unwrap("smolagents.agents", "CodeAgent.run") - except Exception as e: - logger.debug(f"Failed to unwrap CodeAgent.run: {e}") + import smolagents.agents - try: - unwrap("smolagents.agents", "ToolCallingAgent.run") - except Exception as e: - logger.debug(f"Failed to unwrap ToolCallingAgent.run: {e}") + if hasattr(smolagents.agents, "CodeAgent"): + try: + unwrap("smolagents.agents", "CodeAgent.run") + logger.debug("Successfully unwrapped CodeAgent.run") + except Exception as e: + logger.debug(f"Failed to unwrap CodeAgent.run: {e}") - try: - unwrap("smolagents.agents", "ToolCallingAgent.execute_tool_call") - except Exception as e: - logger.debug(f"Failed to unwrap ToolCallingAgent.execute_tool_call: {e}") + if hasattr(smolagents.agents, "ToolCallingAgent"): + try: + unwrap("smolagents.agents", "ToolCallingAgent.run") + logger.debug("Successfully unwrapped ToolCallingAgent.run") + except Exception as e: + logger.debug(f"Failed to unwrap ToolCallingAgent.run: {e}") - try: - unwrap("smolagents.models", "LiteLLMModel.generate") - except Exception as e: - logger.debug(f"Failed to unwrap LiteLLMModel.generate: {e}") + try: + unwrap("smolagents.agents", "ToolCallingAgent.execute_tool_call") + logger.debug("Successfully unwrapped ToolCallingAgent.execute_tool_call") + except Exception as e: + logger.debug(f"Failed to unwrap ToolCallingAgent.execute_tool_call: {e}") + except ImportError as e: + logger.debug(f"smolagents.agents module not available for unwrapping: {e}") try: - unwrap("smolagents.models", "LiteLLMModel.generate_stream") - except Exception as e: - logger.debug(f"Failed to unwrap LiteLLMModel.generate_stream: {e}") + import smolagents.models + + if hasattr(smolagents.models, "LiteLLMModel"): + try: + unwrap("smolagents.models", "LiteLLMModel.generate") + logger.debug("Successfully unwrapped LiteLLMModel.generate") + except Exception as e: + logger.debug(f"Failed to unwrap LiteLLMModel.generate: {e}") + + try: + unwrap("smolagents.models", "LiteLLMModel.generate_stream") + logger.debug("Successfully unwrapped LiteLLMModel.generate_stream") + except Exception as e: + logger.debug(f"Failed to unwrap LiteLLMModel.generate_stream: {e}") + except ImportError as e: + logger.debug(f"smolagents.models module not available for unwrapping: {e}") logger.info("SmoLAgents instrumentation disabled") From c4005ec4d76d2b55facf9b829468a5be7d49fcb6 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Wed, 18 Jun 2025 16:03:07 +0530 Subject: [PATCH 6/8] refactor: streamline agent and model wrapping with improved error handling --- .../agentic/smolagents/instrumentor.py | 106 +++++------------- 1 file changed, 28 insertions(+), 78 deletions(-) diff --git a/agentops/instrumentation/agentic/smolagents/instrumentor.py b/agentops/instrumentation/agentic/smolagents/instrumentor.py index ff5311316..d19323b72 100644 --- a/agentops/instrumentation/agentic/smolagents/instrumentor.py +++ b/agentops/instrumentation/agentic/smolagents/instrumentor.py @@ -83,49 +83,18 @@ def _custom_wrap(self, **kwargs): This is called after normal wrapping, but we use it for all wrapping since we don't have normal wrapped methods. """ - # Core agent operations - wrap with error handling - try: - # Import the module first to ensure it's loaded - import smolagents.agents - - # Check if classes exist before wrapping - if hasattr(smolagents.agents, "CodeAgent"): - wrap_function_wrapper("smolagents.agents", "CodeAgent.run", self._agent_run_wrapper(self._tracer)) - logger.debug("Successfully wrapped CodeAgent.run") - else: - logger.debug("CodeAgent not found in smolagents.agents") - - if hasattr(smolagents.agents, "ToolCallingAgent"): - wrap_function_wrapper( - "smolagents.agents", "ToolCallingAgent.run", self._agent_run_wrapper(self._tracer) - ) - wrap_function_wrapper( - "smolagents.agents", - "ToolCallingAgent.execute_tool_call", - self._tool_execution_wrapper(self._tracer), - ) - logger.debug("Successfully wrapped ToolCallingAgent methods") - else: - logger.debug("ToolCallingAgent not found in smolagents.agents") - except (ImportError, AttributeError) as e: - logger.debug(f"Failed to wrap agent operations: {e}") + # Core agent operations + wrap_function_wrapper("smolagents.agents", "CodeAgent.run", self._agent_run_wrapper(self._tracer)) + wrap_function_wrapper("smolagents.agents", "ToolCallingAgent.run", self._agent_run_wrapper(self._tracer)) + + # Tool calling operations + wrap_function_wrapper( + "smolagents.agents", "ToolCallingAgent.execute_tool_call", self._tool_execution_wrapper(self._tracer) + ) # Model operations with proper model name extraction - try: - # Import the module first to ensure it's loaded - import smolagents.models - - # Check if class exists before wrapping - if hasattr(smolagents.models, "LiteLLMModel"): - wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate", self._llm_wrapper(self._tracer)) - wrap_function_wrapper( - "smolagents.models", "LiteLLMModel.generate_stream", self._llm_wrapper(self._tracer) - ) - logger.debug("Successfully wrapped LiteLLMModel methods") - else: - logger.debug("LiteLLMModel not found in smolagents.models") - except (ImportError, AttributeError) as e: - logger.debug(f"Failed to wrap model operations: {e}") + wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate", self._llm_wrapper(self._tracer)) + wrap_function_wrapper("smolagents.models", "LiteLLMModel.generate_stream", self._llm_wrapper(self._tracer)) logger.info("SmoLAgents instrumentation enabled") @@ -289,48 +258,29 @@ def _custom_unwrap(self, **kwargs): # Unwrap all instrumented methods from opentelemetry.instrumentation.utils import unwrap - # Try to import modules before attempting unwrap try: - import smolagents.agents - - if hasattr(smolagents.agents, "CodeAgent"): - try: - unwrap("smolagents.agents", "CodeAgent.run") - logger.debug("Successfully unwrapped CodeAgent.run") - except Exception as e: - logger.debug(f"Failed to unwrap CodeAgent.run: {e}") - - if hasattr(smolagents.agents, "ToolCallingAgent"): - try: - unwrap("smolagents.agents", "ToolCallingAgent.run") - logger.debug("Successfully unwrapped ToolCallingAgent.run") - except Exception as e: - logger.debug(f"Failed to unwrap ToolCallingAgent.run: {e}") + unwrap("smolagents.agents", "CodeAgent.run") + except Exception as e: + logger.debug(f"Failed to unwrap CodeAgent.run: {e}") - try: - unwrap("smolagents.agents", "ToolCallingAgent.execute_tool_call") - logger.debug("Successfully unwrapped ToolCallingAgent.execute_tool_call") - except Exception as e: - logger.debug(f"Failed to unwrap ToolCallingAgent.execute_tool_call: {e}") - except ImportError as e: - logger.debug(f"smolagents.agents module not available for unwrapping: {e}") + try: + unwrap("smolagents.agents", "ToolCallingAgent.run") + except Exception as e: + logger.debug(f"Failed to unwrap ToolCallingAgent.run: {e}") try: - import smolagents.models + unwrap("smolagents.agents", "ToolCallingAgent.execute_tool_call") + except Exception as e: + logger.debug(f"Failed to unwrap ToolCallingAgent.execute_tool_call: {e}") - if hasattr(smolagents.models, "LiteLLMModel"): - try: - unwrap("smolagents.models", "LiteLLMModel.generate") - logger.debug("Successfully unwrapped LiteLLMModel.generate") - except Exception as e: - logger.debug(f"Failed to unwrap LiteLLMModel.generate: {e}") + try: + unwrap("smolagents.models", "LiteLLMModel.generate") + except Exception as e: + logger.debug(f"Failed to unwrap LiteLLMModel.generate: {e}") - try: - unwrap("smolagents.models", "LiteLLMModel.generate_stream") - logger.debug("Successfully unwrapped LiteLLMModel.generate_stream") - except Exception as e: - logger.debug(f"Failed to unwrap LiteLLMModel.generate_stream: {e}") - except ImportError as e: - logger.debug(f"smolagents.models module not available for unwrapping: {e}") + try: + unwrap("smolagents.models", "LiteLLMModel.generate_stream") + except Exception as e: + logger.debug(f"Failed to unwrap LiteLLMModel.generate_stream: {e}") logger.info("SmoLAgents instrumentation disabled") From e61cbc1e688b9fc5dfa221fef226e533ae7eeba3 Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Wed, 18 Jun 2025 16:05:51 +0530 Subject: [PATCH 7/8] refactor: remove duplicate import statement for smolagents in multi_smolagents_system notebook --- examples/smolagents/multi_smolagents_system.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/smolagents/multi_smolagents_system.ipynb b/examples/smolagents/multi_smolagents_system.ipynb index 9a97f154e..f87046192 100644 --- a/examples/smolagents/multi_smolagents_system.ipynb +++ b/examples/smolagents/multi_smolagents_system.ipynb @@ -95,11 +95,11 @@ "metadata": {}, "outputs": [], "source": [ + "from smolagents import LiteLLMModel, tool ,CodeAgent, ToolCallingAgent, DuckDuckGoSearchTool\n", "agentops.init(auto_start_session=False)\n", "tracer = agentops.start_trace(\n", " trace_name=\"Orchestrate a Multi-Agent System\", tags=[\"smolagents\", \"example\", \"multi-agent\", \"agentops-example\"]\n", ")\n", - "from smolagents import LiteLLMModel, tool ,CodeAgent, ToolCallingAgent, DuckDuckGoSearchTool\n", "model = LiteLLMModel(\"openai/gpt-4o-mini\")" ] }, From a92c07eff79980cc1a72c563d26ba208cd4e6abf Mon Sep 17 00:00:00 2001 From: Dwij Patel Date: Thu, 19 Jun 2025 03:33:42 +0530 Subject: [PATCH 8/8] refactor: rename BaseAgentOpsInstrumentor to CommonInstrumentor and update references across the codebase --- agentops/instrumentation/README.md | 10 +++++----- agentops/instrumentation/__init__.py | 12 ++++++------ .../instrumentation/agentic/ag2/instrumentor.py | 4 ++-- .../instrumentation/agentic/agno/instrumentor.py | 4 ++-- .../instrumentation/agentic/crewai/__init__.py | 4 ++-- .../agentic/crewai/instrumentation.py | 4 ++-- .../agentic/google_adk/__init__.py | 4 ++-- .../agentic/google_adk/instrumentor.py | 4 ++-- .../agentic/openai_agents/attributes/common.py | 2 +- .../instrumentation/agentic/smolagents/README.md | 2 +- .../agentic/smolagents/__init__.py | 4 ++-- .../agentic/smolagents/instrumentor.py | 4 ++-- agentops/instrumentation/common/__init__.py | 4 ++-- agentops/instrumentation/common/instrumentor.py | 2 +- .../providers/anthropic/instrumentor.py | 4 ++-- .../providers/google_genai/__init__.py | 4 ++-- .../providers/google_genai/instrumentor.py | 4 ++-- .../providers/ibm_watsonx_ai/__init__.py | 4 ++-- .../providers/ibm_watsonx_ai/instrumentor.py | 4 ++-- .../providers/mem0/instrumentor.py | 4 ++-- .../instrumentation/providers/openai/__init__.py | 4 ++-- .../providers/openai/instrumentor.py | 4 ++-- .../concurrent_futures/instrumentation.py | 4 ++-- .../openai_core/test_instrumentor.py | 16 ++++++++-------- 24 files changed, 58 insertions(+), 58 deletions(-) diff --git a/agentops/instrumentation/README.md b/agentops/instrumentation/README.md index a97b4649c..2b7e21ed9 100644 --- a/agentops/instrumentation/README.md +++ b/agentops/instrumentation/README.md @@ -13,7 +13,7 @@ This package provides OpenTelemetry instrumentation for various LLM providers an - **Google ADK** (`v0.1.0+`) - **Agno** (`v0.0.1+`) - **Mem0** (`v0.1.0+`) -- **SmolAgents** (`v0.1.0+`) +- **smolagents** (`v0.1.0+`) ## Common Module Usage @@ -21,12 +21,12 @@ The `agentops.instrumentation.common` module provides shared utilities for creat ### Base Instrumentor -Use `BaseAgentOpsInstrumentor` for creating new instrumentations: +Use `CommonInstrumentor` for creating new instrumentations: ```python -from agentops.instrumentation.common import BaseAgentOpsInstrumentor, InstrumentorConfig, WrapConfig +from agentops.instrumentation.common import CommonInstrumentor, InstrumentorConfig, WrapConfig -class MyInstrumentor(BaseAgentOpsInstrumentor): +class MyInstrumentor(CommonInstrumentor): def __init__(self): config = InstrumentorConfig( library_name="my-library", @@ -138,7 +138,7 @@ recorder.record_duration(1.5) 1. Create a new directory under `agentops/instrumentation/` for your provider 2. Create an `__init__.py` file with version information -3. Create an `instrumentor.py` file extending `BaseAgentOpsInstrumentor` +3. Create an `instrumentor.py` file extending `CommonInstrumentor` 4. Create attribute handlers in an `attributes/` subdirectory 5. Add your instrumentor to the main `__init__.py` configuration diff --git a/agentops/instrumentation/__init__.py b/agentops/instrumentation/__init__.py index 54ae6ee5e..2df5c7a9a 100644 --- a/agentops/instrumentation/__init__.py +++ b/agentops/instrumentation/__init__.py @@ -48,7 +48,7 @@ class InstrumentorConfig(TypedDict): PROVIDERS: dict[str, InstrumentorConfig] = { "openai": { "module_name": "agentops.instrumentation.providers.openai", - "class_name": "OpenAIInstrumentor", + "class_name": "OpenaiInstrumentor", "min_version": "1.0.0", }, "anthropic": { @@ -58,12 +58,12 @@ class InstrumentorConfig(TypedDict): }, "ibm_watsonx_ai": { "module_name": "agentops.instrumentation.providers.ibm_watsonx_ai", - "class_name": "IBMWatsonXInstrumentor", + "class_name": "WatsonxInstrumentor", "min_version": "0.1.0", }, "google.genai": { "module_name": "agentops.instrumentation.providers.google_genai", - "class_name": "GoogleGenAIInstrumentor", + "class_name": "GoogleGenaiInstrumentor", "min_version": "0.1.0", "package_name": "google-genai", # Actual pip package name }, @@ -89,7 +89,7 @@ class InstrumentorConfig(TypedDict): AGENTIC_LIBRARIES: dict[str, InstrumentorConfig] = { "crewai": { "module_name": "agentops.instrumentation.agentic.crewai", - "class_name": "CrewAIInstrumentor", + "class_name": "CrewaiInstrumentor", "min_version": "0.56.0", }, "autogen": { @@ -104,7 +104,7 @@ class InstrumentorConfig(TypedDict): }, "google.adk": { "module_name": "agentops.instrumentation.agentic.google_adk", - "class_name": "GoogleADKInstrumentor", + "class_name": "GooogleAdkInstrumentor", "min_version": "0.1.0", }, "agno": { @@ -114,7 +114,7 @@ class InstrumentorConfig(TypedDict): }, "smolagents": { "module_name": "agentops.instrumentation.agentic.smolagents", - "class_name": "SmolAgentsInstrumentor", + "class_name": "SmolagentsInstrumentor", "min_version": "1.0.0", }, } diff --git a/agentops/instrumentation/agentic/ag2/instrumentor.py b/agentops/instrumentation/agentic/ag2/instrumentor.py index 8a6558970..ebb9bf594 100644 --- a/agentops/instrumentation/agentic/ag2/instrumentor.py +++ b/agentops/instrumentation/agentic/ag2/instrumentor.py @@ -14,7 +14,7 @@ from agentops.logging import logger from agentops.instrumentation.common import ( - BaseAgentOpsInstrumentor, + CommonInstrumentor, InstrumentorConfig, StandardMetrics, create_span, @@ -28,7 +28,7 @@ from agentops.semconv.tool import ToolAttributes -class AG2Instrumentor(BaseAgentOpsInstrumentor): +class AG2Instrumentor(CommonInstrumentor): """Instrumentor for AG2 (AutoGen) This instrumentor captures high-level events from AG2's agent interactions, diff --git a/agentops/instrumentation/agentic/agno/instrumentor.py b/agentops/instrumentation/agentic/agno/instrumentor.py index 3576a6bdb..1f5fab321 100644 --- a/agentops/instrumentation/agentic/agno/instrumentor.py +++ b/agentops/instrumentation/agentic/agno/instrumentor.py @@ -25,7 +25,7 @@ from agentops.logging import logger from agentops.instrumentation.common import ( - BaseAgentOpsInstrumentor, + CommonInstrumentor, StandardMetrics, InstrumentorConfig, ) @@ -912,7 +912,7 @@ def get_agent_context_for_llm(): return None, None -class AgnoInstrumentor(BaseAgentOpsInstrumentor): +class AgnoInstrumentor(CommonInstrumentor): """Agno instrumentation class.""" def __init__(self): diff --git a/agentops/instrumentation/agentic/crewai/__init__.py b/agentops/instrumentation/agentic/crewai/__init__.py index 692478d0e..102c40743 100644 --- a/agentops/instrumentation/agentic/crewai/__init__.py +++ b/agentops/instrumentation/agentic/crewai/__init__.py @@ -1,6 +1,6 @@ """OpenTelemetry CrewAI instrumentation""" from agentops.instrumentation.agentic.crewai.version import __version__ -from agentops.instrumentation.agentic.crewai.instrumentation import CrewAIInstrumentor +from agentops.instrumentation.agentic.crewai.instrumentation import CrewaiInstrumentor -__all__ = ["CrewAIInstrumentor", "__version__"] +__all__ = ["CrewaiInstrumentor", "__version__"] diff --git a/agentops/instrumentation/agentic/crewai/instrumentation.py b/agentops/instrumentation/agentic/crewai/instrumentation.py index 0f2102d61..421412403 100644 --- a/agentops/instrumentation/agentic/crewai/instrumentation.py +++ b/agentops/instrumentation/agentic/crewai/instrumentation.py @@ -9,7 +9,7 @@ from opentelemetry.instrumentation.utils import unwrap from agentops.instrumentation.common import ( - BaseAgentOpsInstrumentor, + CommonInstrumentor, InstrumentorConfig, StandardMetrics, create_wrapper_factory, @@ -68,7 +68,7 @@ def attach_tool_executions_to_agent_span(span): del _tool_executions_by_agent[span_id] -class CrewAIInstrumentor(BaseAgentOpsInstrumentor): +class CrewaiInstrumentor(CommonInstrumentor): """Instrumentor for CrewAI framework.""" def __init__(self): diff --git a/agentops/instrumentation/agentic/google_adk/__init__.py b/agentops/instrumentation/agentic/google_adk/__init__.py index 8dda30547..7c3d64977 100644 --- a/agentops/instrumentation/agentic/google_adk/__init__.py +++ b/agentops/instrumentation/agentic/google_adk/__init__.py @@ -13,7 +13,7 @@ LIBRARY_NAME = _library_info.name LIBRARY_VERSION = _library_info.version -from agentops.instrumentation.agentic.google_adk.instrumentor import GoogleADKInstrumentor # noqa: E402 +from agentops.instrumentation.agentic.google_adk.instrumentor import GooogleAdkInstrumentor # noqa: E402 from agentops.instrumentation.agentic.google_adk import patch # noqa: E402 -__all__ = ["LIBRARY_NAME", "LIBRARY_VERSION", "GoogleADKInstrumentor", "patch"] +__all__ = ["LIBRARY_NAME", "LIBRARY_VERSION", "GooogleAdkInstrumentor", "patch"] diff --git a/agentops/instrumentation/agentic/google_adk/instrumentor.py b/agentops/instrumentation/agentic/google_adk/instrumentor.py index a14e17d6a..c7e054065 100644 --- a/agentops/instrumentation/agentic/google_adk/instrumentor.py +++ b/agentops/instrumentation/agentic/google_adk/instrumentor.py @@ -11,7 +11,7 @@ from agentops.logging import logger from opentelemetry.metrics import Meter -from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics, InstrumentorConfig +from agentops.instrumentation.common import CommonInstrumentor, StandardMetrics, InstrumentorConfig from agentops.instrumentation.agentic.google_adk.patch import patch_adk, unpatch_adk # Library info for tracer/meter @@ -19,7 +19,7 @@ LIBRARY_VERSION = "0.1.0" -class GoogleADKInstrumentor(BaseAgentOpsInstrumentor): +class GooogleAdkInstrumentor(CommonInstrumentor): """An instrumentor for Google Agent Development Kit (ADK). This instrumentor patches Google ADK to: diff --git a/agentops/instrumentation/agentic/openai_agents/attributes/common.py b/agentops/instrumentation/agentic/openai_agents/attributes/common.py index 18ebe8997..154055db1 100644 --- a/agentops/instrumentation/agentic/openai_agents/attributes/common.py +++ b/agentops/instrumentation/agentic/openai_agents/attributes/common.py @@ -37,7 +37,7 @@ AgentAttributes.AGENT_TOOLS: "tools", AgentAttributes.HANDOFFS: "handoffs", WorkflowAttributes.WORKFLOW_INPUT: "input", - WorkflowAttributes.WORKFLOW_FINAL_OUTPUT: "output", + WorkflowAttributes.WORKFLOW_OUTPUT: "output", } diff --git a/agentops/instrumentation/agentic/smolagents/README.md b/agentops/instrumentation/agentic/smolagents/README.md index 20d63ff37..d89383f33 100644 --- a/agentops/instrumentation/agentic/smolagents/README.md +++ b/agentops/instrumentation/agentic/smolagents/README.md @@ -26,7 +26,7 @@ This module provides OpenTelemetry instrumentation for the SmoLAgents framework. ```python from agentops import init -from agentops.instrumentation.smolagents import SmoLAgentsInstrumentor +from agentops.instrumentation.smolagents import SmolagentsInstrumentor # Initialize AgentOps with your API key init(api_key="your-api-key") diff --git a/agentops/instrumentation/agentic/smolagents/__init__.py b/agentops/instrumentation/agentic/smolagents/__init__.py index 7a8ede8df..1302ff089 100644 --- a/agentops/instrumentation/agentic/smolagents/__init__.py +++ b/agentops/instrumentation/agentic/smolagents/__init__.py @@ -7,6 +7,6 @@ LIBRARY_NAME = _library_info.name LIBRARY_VERSION = _library_info.version -from agentops.instrumentation.agentic.smolagents.instrumentor import SmolAgentsInstrumentor # noqa: E402 +from agentops.instrumentation.agentic.smolagents.instrumentor import SmolagentsInstrumentor # noqa: E402 -__all__ = ["SmolAgentsInstrumentor"] +__all__ = ["SmolagentsInstrumentor"] diff --git a/agentops/instrumentation/agentic/smolagents/instrumentor.py b/agentops/instrumentation/agentic/smolagents/instrumentor.py index d19323b72..ff185edfc 100644 --- a/agentops/instrumentation/agentic/smolagents/instrumentor.py +++ b/agentops/instrumentation/agentic/smolagents/instrumentor.py @@ -5,7 +5,7 @@ from opentelemetry.metrics import Meter from wrapt import wrap_function_wrapper -from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics, InstrumentorConfig +from agentops.instrumentation.common import CommonInstrumentor, StandardMetrics, InstrumentorConfig from agentops.logging import logger # Library info for tracer/meter @@ -53,7 +53,7 @@ def get_stream_attributes(*args, **kwargs): return {} -class SmolAgentsInstrumentor(BaseAgentOpsInstrumentor): +class SmolagentsInstrumentor(CommonInstrumentor): """Instrumentor for SmoLAgents library.""" def __init__(self): diff --git a/agentops/instrumentation/common/__init__.py b/agentops/instrumentation/common/__init__.py index 81bd8f162..b8779aff7 100644 --- a/agentops/instrumentation/common/__init__.py +++ b/agentops/instrumentation/common/__init__.py @@ -8,7 +8,7 @@ from agentops.instrumentation.common.wrappers import _with_tracer_wrapper, WrapConfig, wrap, unwrap from agentops.instrumentation.common.instrumentor import ( InstrumentorConfig, - BaseAgentOpsInstrumentor, + CommonInstrumentor, create_wrapper_factory, ) from agentops.instrumentation.common.metrics import StandardMetrics, MetricsRecorder @@ -51,7 +51,7 @@ "unwrap", # Instrumentor "InstrumentorConfig", - "BaseAgentOpsInstrumentor", + "CommonInstrumentor", "create_wrapper_factory", # Metrics "StandardMetrics", diff --git a/agentops/instrumentation/common/instrumentor.py b/agentops/instrumentation/common/instrumentor.py index 5011a2a5a..03bb4f608 100644 --- a/agentops/instrumentation/common/instrumentor.py +++ b/agentops/instrumentation/common/instrumentor.py @@ -27,7 +27,7 @@ class InstrumentorConfig: dependencies: Collection[str] = field(default_factory=list) -class BaseAgentOpsInstrumentor(BaseInstrumentor, ABC): +class CommonInstrumentor(BaseInstrumentor, ABC): """Base class for AgentOps instrumentors with common functionality.""" def __init__(self, config: InstrumentorConfig): diff --git a/agentops/instrumentation/providers/anthropic/instrumentor.py b/agentops/instrumentation/providers/anthropic/instrumentor.py index 75ed1b4bb..7e4d5a73c 100644 --- a/agentops/instrumentation/providers/anthropic/instrumentor.py +++ b/agentops/instrumentation/providers/anthropic/instrumentor.py @@ -32,7 +32,7 @@ from wrapt import wrap_function_wrapper from agentops.logging import logger -from agentops.instrumentation.common import BaseAgentOpsInstrumentor, InstrumentorConfig, WrapConfig, StandardMetrics +from agentops.instrumentation.common import CommonInstrumentor, InstrumentorConfig, WrapConfig, StandardMetrics from agentops.instrumentation.providers.anthropic import LIBRARY_NAME, LIBRARY_VERSION from agentops.instrumentation.providers.anthropic.attributes.message import ( get_message_attributes, @@ -46,7 +46,7 @@ from opentelemetry.instrumentation.utils import unwrap as otel_unwrap -class AnthropicInstrumentor(BaseAgentOpsInstrumentor): +class AnthropicInstrumentor(CommonInstrumentor): """An instrumentor for Anthropic's Claude API. This class provides instrumentation for Anthropic's Claude API by wrapping key methods diff --git a/agentops/instrumentation/providers/google_genai/__init__.py b/agentops/instrumentation/providers/google_genai/__init__.py index 0b82d64b1..d08dd4b24 100644 --- a/agentops/instrumentation/providers/google_genai/__init__.py +++ b/agentops/instrumentation/providers/google_genai/__init__.py @@ -15,10 +15,10 @@ LIBRARY_VERSION = _library_info.version # Import after defining constants to avoid circular imports -from agentops.instrumentation.providers.google_genai.instrumentor import GoogleGenAIInstrumentor # noqa: E402 +from agentops.instrumentation.providers.google_genai.instrumentor import GoogleGenaiInstrumentor # noqa: E402 __all__ = [ "LIBRARY_NAME", "LIBRARY_VERSION", - "GoogleGenAIInstrumentor", + "GoogleGenaiInstrumentor", ] diff --git a/agentops/instrumentation/providers/google_genai/instrumentor.py b/agentops/instrumentation/providers/google_genai/instrumentor.py index 282729b98..32401a785 100644 --- a/agentops/instrumentation/providers/google_genai/instrumentor.py +++ b/agentops/instrumentation/providers/google_genai/instrumentor.py @@ -13,7 +13,7 @@ from opentelemetry.metrics import Meter from agentops.logging import logger -from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics, InstrumentorConfig +from agentops.instrumentation.common import CommonInstrumentor, StandardMetrics, InstrumentorConfig from agentops.instrumentation.common.wrappers import WrapConfig from agentops.instrumentation.providers.google_genai.attributes.model import ( get_generate_content_attributes, @@ -97,7 +97,7 @@ ] -class GoogleGenAIInstrumentor(BaseAgentOpsInstrumentor): +class GoogleGenaiInstrumentor(CommonInstrumentor): """An instrumentor for Google Generative AI (Gemini) API. This class provides instrumentation for Google's Generative AI API by wrapping key methods diff --git a/agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py b/agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py index e590ebff9..0717d4cbe 100644 --- a/agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py +++ b/agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py @@ -19,10 +19,10 @@ LIBRARY_VERSION = _library_info.version # Import after defining constants to avoid circular imports -from agentops.instrumentation.providers.ibm_watsonx_ai.instrumentor import IBMWatsonXInstrumentor # noqa: E402 +from agentops.instrumentation.providers.ibm_watsonx_ai.instrumentor import WatsonxInstrumentor # noqa: E402 __all__ = [ "LIBRARY_NAME", "LIBRARY_VERSION", - "IBMWatsonXInstrumentor", + "WatsonxInstrumentor", ] diff --git a/agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py b/agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py index dd5ec27d4..578827eda 100644 --- a/agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py +++ b/agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py @@ -17,7 +17,7 @@ from opentelemetry.metrics import Meter from agentops.logging import logger -from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics, InstrumentorConfig +from agentops.instrumentation.common import CommonInstrumentor, StandardMetrics, InstrumentorConfig from agentops.instrumentation.common.wrappers import WrapConfig from agentops.instrumentation.providers.ibm_watsonx_ai.attributes.attributes import ( get_generate_attributes, @@ -81,7 +81,7 @@ ] -class IBMWatsonXInstrumentor(BaseAgentOpsInstrumentor): +class WatsonxInstrumentor(CommonInstrumentor): """An instrumentor for IBM watsonx.ai API.""" def __init__(self): diff --git a/agentops/instrumentation/providers/mem0/instrumentor.py b/agentops/instrumentation/providers/mem0/instrumentor.py index 124d84de1..bf191d5b6 100644 --- a/agentops/instrumentation/providers/mem0/instrumentor.py +++ b/agentops/instrumentation/providers/mem0/instrumentor.py @@ -2,7 +2,7 @@ from wrapt import wrap_function_wrapper from opentelemetry.metrics import Meter -from agentops.instrumentation.common import BaseAgentOpsInstrumentor, StandardMetrics, InstrumentorConfig +from agentops.instrumentation.common import CommonInstrumentor, StandardMetrics, InstrumentorConfig from agentops.logging import logger # Import from refactored structure @@ -180,7 +180,7 @@ ] -class Mem0Instrumentor(BaseAgentOpsInstrumentor): +class Mem0Instrumentor(CommonInstrumentor): """An instrumentor for Mem0's client library. This class provides instrumentation for Mem0's memory operations by wrapping key methods diff --git a/agentops/instrumentation/providers/openai/__init__.py b/agentops/instrumentation/providers/openai/__init__.py index fbe903e09..8f3c2cdcb 100644 --- a/agentops/instrumentation/providers/openai/__init__.py +++ b/agentops/instrumentation/providers/openai/__init__.py @@ -12,10 +12,10 @@ LIBRARY_VERSION = _library_info.version # Import after defining constants to avoid circular imports -from agentops.instrumentation.providers.openai.instrumentor import OpenAIInstrumentor # noqa: E402 +from agentops.instrumentation.providers.openai.instrumentor import OpenaiInstrumentor # noqa: E402 __all__ = [ "LIBRARY_NAME", "LIBRARY_VERSION", - "OpenAIInstrumentor", + "OpenaiInstrumentor", ] diff --git a/agentops/instrumentation/providers/openai/instrumentor.py b/agentops/instrumentation/providers/openai/instrumentor.py index 1efbe0802..37883bf7a 100644 --- a/agentops/instrumentation/providers/openai/instrumentor.py +++ b/agentops/instrumentation/providers/openai/instrumentor.py @@ -15,7 +15,7 @@ from typing import Dict, Any from agentops.instrumentation.common import ( - BaseAgentOpsInstrumentor, + CommonInstrumentor, InstrumentorConfig, WrapConfig, StandardMetrics, @@ -43,7 +43,7 @@ _instruments = ("openai >= 0.27.0",) -class OpenAIInstrumentor(BaseAgentOpsInstrumentor): +class OpenaiInstrumentor(CommonInstrumentor): """An instrumentor for OpenAI's client library with comprehensive coverage.""" def __init__( diff --git a/agentops/instrumentation/utilities/concurrent_futures/instrumentation.py b/agentops/instrumentation/utilities/concurrent_futures/instrumentation.py index 36771069e..594cd0420 100644 --- a/agentops/instrumentation/utilities/concurrent_futures/instrumentation.py +++ b/agentops/instrumentation/utilities/concurrent_futures/instrumentation.py @@ -11,7 +11,7 @@ from concurrent.futures import ThreadPoolExecutor, Future -from agentops.instrumentation.common import BaseAgentOpsInstrumentor, InstrumentorConfig +from agentops.instrumentation.common import CommonInstrumentor, InstrumentorConfig from agentops.instrumentation.common.wrappers import WrapConfig from agentops.logging import logger @@ -95,7 +95,7 @@ def wrapped_submit(self: ThreadPoolExecutor, func: Callable[..., R], *args: Any, return wrapped_submit -class ConcurrentFuturesInstrumentor(BaseAgentOpsInstrumentor): +class ConcurrentFuturesInstrumentor(CommonInstrumentor): """ Instrumentor for concurrent.futures module. diff --git a/tests/unit/instrumentation/openai_core/test_instrumentor.py b/tests/unit/instrumentation/openai_core/test_instrumentor.py index 63e9b1ea4..52fd6f81d 100644 --- a/tests/unit/instrumentation/openai_core/test_instrumentor.py +++ b/tests/unit/instrumentation/openai_core/test_instrumentor.py @@ -15,7 +15,7 @@ from unittest.mock import MagicMock, patch -from agentops.instrumentation.providers.openai.instrumentor import OpenAIInstrumentor +from agentops.instrumentation.providers.openai.instrumentor import OpenaiInstrumentor from agentops.instrumentation.common.wrappers import WrapConfig @@ -32,7 +32,7 @@ def load_fixture(fixture_name): OPENAI_RESPONSE_TOOL_CALLS = load_fixture("openai_response_tool_calls.json") # Response API with tool calls -class TestOpenAIInstrumentor: +class TestOpenaiInstrumentor: """Tests for OpenAI API instrumentation, focusing on Response API support""" @pytest.fixture @@ -49,7 +49,7 @@ def instrumentor(self): # Create a real instrumentation setup for testing mock_tracer_provider = MagicMock() - instrumentor = OpenAIInstrumentor() + instrumentor = OpenaiInstrumentor() # To avoid timing issues with the fixture, we need to ensure patch # objects are created before being used in the test @@ -82,8 +82,8 @@ def instrumentor(self): def test_instrumentor_initialization(self): """Test instrumentor is initialized with correct configuration""" - instrumentor = OpenAIInstrumentor() - assert instrumentor.__class__.__name__ == "OpenAIInstrumentor" + instrumentor = OpenaiInstrumentor() + assert instrumentor.__class__.__name__ == "OpenaiInstrumentor" # Verify it inherits from BaseInstrumentor from opentelemetry.instrumentation.instrumentor import BaseInstrumentor @@ -165,7 +165,7 @@ def test_calls_parent_uninstrument(self, instrumentor): def test_wrapper_error_handling(self): """Test that the instrumentor handles errors when wrapping methods""" # Create instrumentor - instrumentor = OpenAIInstrumentor() + instrumentor = OpenaiInstrumentor() # Mock wrap to raise an exception with patch("agentops.instrumentation.common.wrappers.wrap") as mock_wrap: @@ -181,7 +181,7 @@ def test_wrapper_error_handling(self): def test_unwrapper_error_handling(self): """Test that the instrumentor handles errors when unwrapping methods""" # Create instrumentor - instrumentor = OpenAIInstrumentor() + instrumentor = OpenaiInstrumentor() # Mock unwrap to raise an exception with patch("agentops.instrumentation.common.wrappers.unwrap") as mock_unwrap: @@ -197,7 +197,7 @@ def test_unwrapper_error_handling(self): def test_instrumentation_with_tracer(self): """Test that the instrumentor gets a tracer with the correct name and version""" # Create instrumentor - instrumentor = OpenAIInstrumentor() + instrumentor = OpenaiInstrumentor() # Since get_tracer is now imported at module level in openai/instrumentor.py, # we can test this through spying on the _instrument method instead