diff --git a/agentops/instrumentation/__init__.py b/agentops/instrumentation/__init__.py index e47b6e7fb..336d6e2bc 100644 --- a/agentops/instrumentation/__init__.py +++ b/agentops/instrumentation/__init__.py @@ -82,6 +82,11 @@ class InstrumentorConfig(TypedDict): "class_name": "CrewaiInstrumentor", "min_version": "0.56.0", }, + "autogen_agentchat": { + "module_name": "agentops.instrumentation.agentic.autogen", + "class_name": "AutoGenInstrumentor", + "min_version": "0.6.4", + }, "autogen": { "module_name": "agentops.instrumentation.agentic.ag2", "class_name": "AG2Instrumentor", @@ -288,6 +293,15 @@ def _perform_instrumentation(package_name: str): if not _should_instrument_package(package_name): return + # If we are about to instrument the FIRST agentic library, set the flag **before** importing + # its instrumentor. This prevents re-entrancy where the agentic library imports a provider + # (e.g. ``openai``) while its own instrumentor module is still only *partially* initialised, + # which leads to the ``partially initialised module ... has no attribute`` circular-import + # error that users are seeing. We only set this once – subsequent calls will short-circuit + # in ``_should_instrument_package``. + if package_name in AGENTIC_LIBRARIES and not _has_agentic_library: + _has_agentic_library = True + # Get the appropriate configuration for the package # Ensure package_name is a key in either PROVIDERS or AGENTIC_LIBRARIES if package_name not in PROVIDERS and package_name not in AGENTIC_LIBRARIES: diff --git a/agentops/instrumentation/agentic/autogen/__init__.py b/agentops/instrumentation/agentic/autogen/__init__.py new file mode 100644 index 000000000..49a264d6b --- /dev/null +++ b/agentops/instrumentation/agentic/autogen/__init__.py @@ -0,0 +1,65 @@ +"""AutoGen Instrumentation Module + +This module provides instrumentation for the original AutoGen framework (autogen_agentchat). +It creates create_agent spans that match the expected trace structure for AutoGen agents. + +""" + +from agentops.instrumentation.common import LibraryInfo + +# Library information +_library_info = LibraryInfo(name="autogen_agentchat") +LIBRARY_NAME = _library_info.name +LIBRARY_VERSION = _library_info.version + +# Import after defining constants to avoid circular imports +from .instrumentor import AutoGenInstrumentor # noqa: E402 + +# Import modular components for advanced users +from .agents import ( # noqa: E402 + BaseChatAgentInstrumentor, + AssistantAgentInstrumentor, + UserProxyAgentInstrumentor, + CodeExecutorAgentInstrumentor, + SocietyOfMindAgentInstrumentor, +) +from .teams import ( # noqa: E402 + RoundRobinGroupChatInstrumentor, + SelectorGroupChatInstrumentor, + SwarmInstrumentor, +) +from .utils import ( # noqa: E402 + AutoGenSpanManager, + extract_agent_attributes, + safe_str, + safe_extract_content, + create_agent_span, + instrument_async_generator, + instrument_coroutine, +) + +__all__ = [ + # Main instrumentors + "AutoGenInstrumentor", + # Library info + "LIBRARY_NAME", + "LIBRARY_VERSION", + # Agent instrumentors + "BaseChatAgentInstrumentor", + "AssistantAgentInstrumentor", + "UserProxyAgentInstrumentor", + "CodeExecutorAgentInstrumentor", + "SocietyOfMindAgentInstrumentor", + # Team instrumentors + "RoundRobinGroupChatInstrumentor", + "SelectorGroupChatInstrumentor", + "SwarmInstrumentor", + # Utilities + "AutoGenSpanManager", + "extract_agent_attributes", + "safe_str", + "safe_extract_content", + "create_agent_span", + "instrument_async_generator", + "instrument_coroutine", +] diff --git a/agentops/instrumentation/agentic/autogen/agents/__init__.py b/agentops/instrumentation/agentic/autogen/agents/__init__.py new file mode 100644 index 000000000..e36c50216 --- /dev/null +++ b/agentops/instrumentation/agentic/autogen/agents/__init__.py @@ -0,0 +1,19 @@ +"""AutoGen agent instrumentation.""" + +from .common import ( + CommonAgentWrappers, + BaseChatAgentInstrumentor, + AssistantAgentInstrumentor, + UserProxyAgentInstrumentor, + CodeExecutorAgentInstrumentor, + SocietyOfMindAgentInstrumentor, +) + +__all__ = [ + "CommonAgentWrappers", + "BaseChatAgentInstrumentor", + "AssistantAgentInstrumentor", + "UserProxyAgentInstrumentor", + "CodeExecutorAgentInstrumentor", + "SocietyOfMindAgentInstrumentor", +] diff --git a/agentops/instrumentation/agentic/autogen/agents/common.py b/agentops/instrumentation/agentic/autogen/agents/common.py new file mode 100644 index 000000000..e0c8dc0d7 --- /dev/null +++ b/agentops/instrumentation/agentic/autogen/agents/common.py @@ -0,0 +1,840 @@ +"""Common wrapper methods for AutoGen agent instrumentors. + +This module provides base wrapper methods that are shared across all AutoGen agent types +to avoid code duplication while allowing specific agents to add their unique methods. +""" + +import logging +from opentelemetry.trace import SpanKind, Status, StatusCode +import inspect +from agentops.instrumentation.common import SpanAttributeManager, create_span +from agentops.semconv.agent import AgentAttributes +from agentops.semconv.span_attributes import SpanAttributes +from agentops.semconv.span_kinds import AgentOpsSpanKindValues +from ..utils.common import ( + AutoGenSpanManager, + extract_agent_attributes, + safe_str, + instrument_coroutine, + instrument_async_generator, +) + +logger = logging.getLogger(__name__) + + +class CommonAgentWrappers: + """Base class with common wrapper methods for all AutoGen agents.""" + + def __init__(self, tracer, attribute_manager: SpanAttributeManager): + self.tracer = tracer + self.attribute_manager = attribute_manager + self.span_manager = AutoGenSpanManager(tracer, attribute_manager) + + def _create_agent_wrapper(self): + """Common wrapper for capturing agent creation.""" + + def wrapper(wrapped, instance, args, kwargs): + try: + # Extract agent attributes + attributes = extract_agent_attributes(instance, args, kwargs) + agent_name = attributes["name"] + agent_type = attributes["type"] + + # Create the create_agent span + span_name = f"create_agent {agent_name}.workflow" + + if not self.tracer: + return wrapped(*args, **kwargs) + + # Create the span using a context-manager so lifecycle is automatic + with create_span( + self.tracer, + span_name, + kind=SpanKind.CLIENT, + attribute_manager=self.attribute_manager, + ) as span: + try: + # Set base attributes + self.span_manager.set_base_attributes(span, agent_name, "create_agent") + span.set_attribute("gen_ai.agent.type", agent_type) + + # Set description if available + if "description" in attributes: + span.set_attribute("gen_ai.agent.description", attributes["description"]) + + # Call the original __init__ method + result = wrapped(*args, **kwargs) + + # Store metadata on the instance for future use + if hasattr(instance, "__dict__"): + instance._agentops_metadata = { + "name": agent_name, + "type": agent_type, + "system": "autogen", + } + + return result + + except Exception as e: + # Span status/error handling will be managed by create_span, but we log for visibility + logger.debug(f"[AutoGen DEBUG] Error during create_agent instrumentation: {e}") + raise + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Error in _create_agent_wrapper: {e}") + return wrapped(*args, **kwargs) + + return wrapper + + def _run_wrapper(self): + """Common wrapper for capturing agent run method calls.""" + + def wrapper(wrapped, instance, args, kwargs): + try: + agent_name = getattr(instance, "name", "unnamed_agent") + span_name = f"agent.run.{agent_name}.agent" + + # Call the original method first to get the result + result = wrapped(*args, **kwargs) + + # Check if result is async and wrap it + + if inspect.iscoroutine(result): + # Wrap coroutine to keep span active during execution + async def instrumented_coroutine(): + with self.tracer.start_as_current_span(span_name) as span: + # Set span attributes + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "agent.run") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent") + + # Get task information + task = kwargs.get("task") or (args[0] if args else None) + if task and isinstance(task, str): + span.set_attribute("agent.task", safe_str(task)) + + # Instrument the coroutine + return await instrument_coroutine(result, span, "run") + + return instrumented_coroutine() + else: + # Synchronous result + with self.tracer.start_as_current_span(span_name) as span: + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "agent.run") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent") + + # Get task information + task = kwargs.get("task") or (args[0] if args else None) + if task and isinstance(task, str): + span.set_attribute("agent.task", safe_str(task)) + + return result + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Error in agent run wrapper: {e}") + return wrapped(*args, **kwargs) + + return wrapper + + def _run_stream_wrapper(self): + """Common wrapper for capturing agent run_stream method calls.""" + + def wrapper(wrapped, instance, args, kwargs): + try: + agent_name = getattr(instance, "name", "unnamed_agent") + span_name = f"agent.run_stream.{agent_name}.agent" + + # Call the original method first to get the result + result = wrapped(*args, **kwargs) + + # Check if result is async and wrap it + if inspect.isasyncgen(result): + # Wrap async generator to keep span active during execution + async def instrumented_async_generator(): + with self.tracer.start_as_current_span(span_name) as span: + # Set span attributes + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "agent.run_stream") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent") + + # Get task information + task = kwargs.get("task") or (args[0] if args else None) + if task and isinstance(task, str): + span.set_attribute("agent.task", safe_str(task)) + + # Instrument the async generator + async for item in instrument_async_generator(result, span, "run_stream"): + yield item + + return instrumented_async_generator() + + elif inspect.iscoroutine(result): + # Wrap coroutine to keep span active during execution + async def instrumented_coroutine(): + with self.tracer.start_as_current_span(span_name) as span: + # Set span attributes + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "agent.run_stream") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent") + + # Get task information + task = kwargs.get("task") or (args[0] if args else None) + if task and isinstance(task, str): + span.set_attribute("agent.task", safe_str(task)) + + # Instrument the coroutine + return await instrument_coroutine(result, span, "run_stream") + + return instrumented_coroutine() + else: + # Synchronous result + with self.tracer.start_as_current_span(span_name) as span: + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "agent.run_stream") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent") + + # Get task information + task = kwargs.get("task") or (args[0] if args else None) + if task and isinstance(task, str): + span.set_attribute("agent.task", safe_str(task)) + + return result + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Error in agent run_stream wrapper: {e}") + return wrapped(*args, **kwargs) + + return wrapper + + def _on_messages_wrapper(self): + """Common wrapper for capturing agent on_messages method calls.""" + + def wrapper(wrapped, instance, args, kwargs): + try: + agent_name = getattr(instance, "name", "unnamed_agent") + span_name = f"agent.on_messages.{agent_name}.agent" + + # Call the original method first to get the result + result = wrapped(*args, **kwargs) + + # Check if result is async and wrap it + if inspect.iscoroutine(result): + # Wrap coroutine to keep span active during execution + async def instrumented_coroutine(): + with self.tracer.start_as_current_span(span_name) as span: + # Set span attributes + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "agent.on_messages") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent") + span.set_attribute(AgentAttributes.AGENT_NAME, agent_name) + + # Get messages information + messages = kwargs.get("messages") or (args[0] if args else None) + if messages and hasattr(messages, "__len__"): + span.set_attribute("agent.input_message_count", len(messages)) + + # Instrument the coroutine + return await instrument_coroutine(result, span, "on_messages") + + return instrumented_coroutine() + else: + # Synchronous result + with self.tracer.start_as_current_span(span_name) as span: + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "agent.on_messages") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent") + span.set_attribute(AgentAttributes.AGENT_NAME, agent_name) + + # Get messages information + messages = kwargs.get("messages") or (args[0] if args else None) + if messages and hasattr(messages, "__len__"): + span.set_attribute("agent.input_message_count", len(messages)) + + return result + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Error in agent on_messages wrapper: {e}") + return wrapped(*args, **kwargs) + + return wrapper + + def _on_messages_stream_wrapper(self): + """Common wrapper for capturing agent on_messages_stream method calls.""" + + def wrapper(wrapped, instance, args, kwargs): + try: + agent_name = getattr(instance, "name", "unnamed_agent") + span_name = f"agent.on_messages_stream.{agent_name}.agent" + + # Call the original method first to get the result + result = wrapped(*args, **kwargs) + + # Check if result is async and wrap it + if inspect.isasyncgen(result): + # Wrap async generator to keep span active during execution + async def instrumented_async_generator(): + with self.tracer.start_as_current_span(span_name) as span: + # Set span attributes + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "agent.on_messages_stream") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent") + span.set_attribute(AgentAttributes.AGENT_NAME, agent_name) + + # Get messages information + messages = kwargs.get("messages") or (args[0] if args else None) + if messages and hasattr(messages, "__len__"): + span.set_attribute("agent.input_message_count", len(messages)) + + # Track streaming progress + item_count = 0 + event_types = set() + + # Instrument the async generator + async for item in instrument_async_generator(result, span, "on_messages_stream"): + item_count += 1 + item_type = type(item).__name__ + event_types.add(item_type) + + # Update streaming metrics + span.set_attribute("agent.stream.item_count", item_count) + span.set_attribute("agent.stream.event_types", ",".join(sorted(event_types))) + + yield item + + return instrumented_async_generator() + + elif inspect.iscoroutine(result): + # Wrap coroutine to keep span active during execution + async def instrumented_coroutine(): + with self.tracer.start_as_current_span(span_name) as span: + # Set span attributes + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "agent.on_messages_stream") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent") + span.set_attribute(AgentAttributes.AGENT_NAME, agent_name) + + # Get messages information + messages = kwargs.get("messages") or (args[0] if args else None) + if messages and hasattr(messages, "__len__"): + span.set_attribute("agent.input_message_count", len(messages)) + + # Instrument the coroutine + return await instrument_coroutine(result, span, "on_messages_stream") + + return instrumented_coroutine() + else: + # Synchronous result + with self.tracer.start_as_current_span(span_name) as span: + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "agent.on_messages_stream") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent") + span.set_attribute(AgentAttributes.AGENT_NAME, agent_name) + + # Get messages information + messages = kwargs.get("messages") or (args[0] if args else None) + if messages and hasattr(messages, "__len__"): + span.set_attribute("agent.input_message_count", len(messages)) + + return result + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Error in agent on_messages_stream wrapper: {e}") + return wrapped(*args, **kwargs) + + return wrapper + + def _call_llm_wrapper(self): + """Generic wrapper for capturing LLM interactions.""" + + def wrapper(wrapped, instance, args, kwargs): + try: + agent_name = kwargs.get("agent_name") + + if not agent_name or isinstance(agent_name, property): + possible_name = getattr(instance, "name", None) + agent_name = possible_name if isinstance(possible_name, str) else "unnamed_agent" + + span_name = f"_call_llm {agent_name}.llm" + + # Call the original method first to get the result + result = wrapped(*args, **kwargs) + + # Check if result is async and wrap it + if inspect.isasyncgen(result): + # Wrap async generator to keep span active during execution + async def instrumented_async_generator(): + with self.tracer.start_as_current_span(span_name) as span: + # Set common attributes for all LLM calls + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "_call_llm") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.LLM.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "llm") + + # Extract *all* prompt messages + messages_list = None + if "messages" in kwargs and kwargs["messages"]: + messages_list = kwargs["messages"] + else: + # Try positional args (rare for _call_llm) + if args: + first_arg = args[0] + if hasattr(first_arg, "messages"): + messages_list = first_arg.messages + elif hasattr(first_arg, "get"): + try: + messages_list = first_arg.get("messages") + except Exception: + messages_list = None + # Try AutoGen model_context object + if messages_list is None and "model_context" in kwargs: + mc = kwargs["model_context"] + if mc is not None: + if hasattr(mc, "_messages"): + messages_list = getattr(mc, "_messages", None) + elif hasattr(mc, "messages"): + messages_list = mc.messages + + if messages_list: + span.set_attribute("gen_ai.request.messages.count", len(messages_list)) + + # Start index after system messages already recorded (if any) + prompt_index = len(kwargs.get("system_messages", [])) + + for msg in messages_list: + content = None + role = "user" + + if isinstance(msg, str): + content = msg + role = "user" + else: + content = getattr(msg, "content", None) + role = str(getattr(msg, "role", "user")) + + if content is None: + continue + + span.set_attribute(f"gen_ai.prompt.{prompt_index}.content", safe_str(content)) + span.set_attribute(f"gen_ai.prompt.{prompt_index}.role", role) + prompt_index += 1 + + # Extract model information from kwargs + model_client = kwargs.get("model_client") + if model_client: + # Try different attributes to get model name + model_name = None + if hasattr(model_client, "model"): + model_name = getattr(model_client, "model", None) + elif hasattr(model_client, "_model"): + model_name = getattr(model_client, "_model", None) + elif hasattr(model_client, "model_name"): + model_name = getattr(model_client, "model_name", None) + elif hasattr(model_client, "_resolved_model"): + model_name = getattr(model_client, "_resolved_model", None) + + if model_name: + span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, str(model_name)) + + # Extract system messages list if present + system_messages = kwargs.get("system_messages", []) + if system_messages: + span.set_attribute("gen_ai.request.system_message_count", len(system_messages)) + # Extract first system message content + if hasattr(system_messages[0], "content"): + span.set_attribute( + "gen_ai.request.system_message", safe_str(system_messages[0].content) + ) + + # Already counted above, but also record explicit system prompts for completeness + for idx, sm in enumerate(system_messages): + if hasattr(sm, "content") and sm.content: + span.set_attribute(f"gen_ai.prompt.{idx}.content", safe_str(sm.content)) + span.set_attribute(f"gen_ai.prompt.{idx}.role", "system") + + # Track agent name from kwargs + if "agent_name" in kwargs: + span.set_attribute("gen_ai.agent.name", kwargs["agent_name"]) + + # Track completion data + accumulated_content = "" + total_tokens = 0 + prompt_tokens = 0 + completion_tokens = 0 + finish_reason = None + chunk_count = 0 + + try: + # Process the async generator + async for chunk in result: + chunk_count += 1 + + # AutoGen uses different event types + if hasattr(chunk, "__class__"): + # Handle different AutoGen event types + if hasattr(chunk, "content"): + # This might be a completion event + content = getattr(chunk, "content", None) + if content: + accumulated_content += str(content) + + # Try to extract usage from the chunk + if hasattr(chunk, "usage"): + usage = chunk.usage + + if usage: + # Prefer individual counts if available so we can always compute totals + prompt_tokens = getattr(usage, "prompt_tokens", prompt_tokens) + completion_tokens = getattr( + usage, "completion_tokens", completion_tokens + ) + total_tokens = getattr( + usage, "total_tokens", prompt_tokens + completion_tokens + ) + + # Set both legacy (input/output) and standard (prompt/completion) keys for compatibility + span.set_attribute( + SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens + ) + span.set_attribute( + SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens + ) + span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens) + # Legacy naming still used by some dashboards + span.set_attribute("gen_ai.usage.input_tokens", prompt_tokens) + span.set_attribute("gen_ai.usage.output_tokens", completion_tokens) + + # Check for finish reason within usage if present (OpenAI style) + if hasattr(usage, "finish_reason") and usage.finish_reason: + finish_reason = usage.finish_reason + + # Also handle OpenAI-style chunks + if hasattr(chunk, "choices") and chunk.choices: + choice = chunk.choices[0] + if hasattr(choice, "delta") and hasattr(choice.delta, "content"): + if choice.delta.content: + accumulated_content += choice.delta.content + + if hasattr(choice, "finish_reason") and choice.finish_reason: + finish_reason = choice.finish_reason + + # Done processing usage for this chunk + yield chunk + + # Set final attributes + if accumulated_content: + span.set_attribute("gen_ai.completion.0.content", safe_str(accumulated_content)) + span.set_attribute("gen_ai.completion.0.content_length", len(accumulated_content)) + + if finish_reason: + span.set_attribute("gen_ai.completion.0.finish_reason", finish_reason) + + if total_tokens > 0: + # Set both legacy (input/output) and standard (prompt/completion) keys for compatibility + span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens) + span.set_attribute(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens) + span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens) + span.set_attribute("gen_ai.usage.input_tokens", prompt_tokens) + span.set_attribute("gen_ai.usage.output_tokens", completion_tokens) + + span.set_attribute("gen_ai.response.chunk_count", chunk_count) + span.set_status(Status(StatusCode.OK)) + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Error processing LLM stream: {e}") + span.set_status(Status(StatusCode.ERROR, str(e))) + # Re-raise to maintain original behavior + raise + + return instrumented_async_generator() + + elif inspect.iscoroutine(result): + # Wrap coroutine to keep span active during execution + async def instrumented_coroutine(): + with self.tracer.start_as_current_span(span_name) as span: + # Set common attributes for all LLM calls + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "_call_llm") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.LLM.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "llm") + + try: + # Await the result and process it + llm_result = await result + + # Extract and set completion attributes from result + if hasattr(llm_result, "choices") and llm_result.choices: + choice = llm_result.choices[0] + if hasattr(choice, "message") and hasattr(choice.message, "content"): + content = choice.message.content + if content: + span.set_attribute("gen_ai.completion.0.content", safe_str(content)) + + if hasattr(choice, "finish_reason"): + span.set_attribute("gen_ai.completion.0.finish_reason", choice.finish_reason) + + # Extract usage information + if hasattr(llm_result, "usage") and llm_result.usage: + if hasattr(llm_result.usage, "total_tokens"): + span.set_attribute( + SpanAttributes.LLM_USAGE_TOTAL_TOKENS, llm_result.usage.total_tokens + ) + if hasattr(llm_result.usage, "prompt_tokens"): + span.set_attribute( + SpanAttributes.LLM_USAGE_PROMPT_TOKENS, llm_result.usage.prompt_tokens + ) + if hasattr(llm_result.usage, "completion_tokens"): + span.set_attribute( + SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, + llm_result.usage.completion_tokens, + ) + + span.set_status(Status(StatusCode.OK)) + return llm_result + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Error processing LLM result: {e}") + span.set_status(Status(StatusCode.ERROR, str(e))) + raise + + return instrumented_coroutine() + else: + # Synchronous result + with self.tracer.start_as_current_span(span_name) as span: + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "_call_llm") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.LLM.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "llm") + + # For synchronous calls, we already have the result + # Extract completion information + if hasattr(result, "choices") and result.choices: + choice = result.choices[0] + if hasattr(choice, "message") and hasattr(choice.message, "content"): + content = choice.message.content + if content: + span.set_attribute("gen_ai.completion.0.content", safe_str(content)) + + if hasattr(choice, "finish_reason"): + span.set_attribute("gen_ai.completion.0.finish_reason", choice.finish_reason) + + # Extract usage information + if hasattr(result, "usage") and result.usage: + if hasattr(result.usage, "total_tokens"): + span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, result.usage.total_tokens) + if hasattr(result.usage, "prompt_tokens"): + span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, result.usage.prompt_tokens) + if hasattr(result.usage, "completion_tokens"): + span.set_attribute( + SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, result.usage.completion_tokens + ) + + return result + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Error in _call_llm wrapper: {e}") + return wrapped(*args, **kwargs) + + return wrapper + + def _execute_tool_call_wrapper(self): + """Generic wrapper for capturing tool executions.""" + + def wrapper(wrapped, instance, args, kwargs): + try: + agent_name = getattr(instance, "name", "unnamed_agent") + + # Extract tool information + tool_call = args[0] if args else kwargs.get("tool_call") + tool_name = "unknown_tool" + if tool_call and hasattr(tool_call, "function") and hasattr(tool_call.function, "name"): + tool_name = tool_call.function.name + + span_name = f"tool.{tool_name}.tool" + + # Call the original method first to get the result + result = wrapped(*args, **kwargs) + + # Check if result is async and wrap it + if inspect.iscoroutine(result): + # Wrap coroutine to keep span active during execution + async def instrumented_coroutine(): + with self.tracer.start_as_current_span(span_name) as span: + # Set tool attributes + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "_execute_tool_call") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.TOOL.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "tool") + + # Extract tool arguments if available + if ( + tool_call + and hasattr(tool_call, "function") + and hasattr(tool_call.function, "arguments") + ): + span.set_attribute("tool.arguments", safe_str(tool_call.function.arguments)) + + try: + # Await the result and capture tool output + tool_result = await result + + # Attempt to derive a better tool name from the result if unknown + if tool_name == "unknown_tool": + derived_name = None + # Common Autogen pattern: (FunctionCall, FunctionExecutionResult) + if isinstance(tool_result, (tuple, list)): + for item in tool_result: + derived_name = getattr(item, "name", None) + if derived_name: + break + else: + derived_name = getattr(tool_result, "name", None) + if derived_name: + tool_name_local = str(derived_name) + span.update_name(f"tool.{tool_name_local}.{agent_name}.tool") + span.set_attribute("tool.name", tool_name_local) + + # Set tool result if available + if tool_result: + span.set_attribute("tool.result", safe_str(tool_result)) + + span.set_status(Status(StatusCode.OK)) + return tool_result + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Error in tool execution: {e}") + span.set_status(Status(StatusCode.ERROR, str(e))) + raise + + return instrumented_coroutine() + else: + # Synchronous result + with self.tracer.start_as_current_span(span_name) as span: + span.set_attribute(SpanAttributes.LLM_SYSTEM, "autogen") + span.set_attribute("gen_ai.agent.name", str(agent_name)) + span.set_attribute("gen_ai.operation.name", "_execute_tool_call") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.TOOL.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "tool") + span.set_attribute("tool.name", tool_name) + + # Extract tool arguments if available + if tool_call and hasattr(tool_call, "function") and hasattr(tool_call.function, "arguments"): + span.set_attribute("tool.arguments", safe_str(tool_call.function.arguments)) + + # Set tool result if available + if result: + span.set_attribute("tool.result", safe_str(result)) + + return result + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Error in tool execution wrapper: {e}") + return wrapped(*args, **kwargs) + + return wrapper + + +# Agent Instrumentor Classes + + +class BaseChatAgentInstrumentor(CommonAgentWrappers): + """Instrumentor for AutoGen BaseChatAgent.""" + + def get_wrappers(self): + """Get list of methods to wrap for BaseChatAgent.""" + # Return tuples of (class_name, method_name, wrapper_factory) to avoid circular imports + return [ + ("BaseChatAgent", "run", lambda: self._run_wrapper()), + ("BaseChatAgent", "run_stream", lambda: self._run_stream_wrapper()), + ("BaseChatAgent", "on_messages", lambda: self._on_messages_wrapper()), + ("BaseChatAgent", "on_messages_stream", lambda: self._on_messages_stream_wrapper()), + # ("BaseChatAgent", "_call_llm", lambda: self._call_llm_wrapper()), + # ("BaseChatAgent", "_execute_tool_call", lambda: self._execute_tool_call_wrapper()), + ] + + +class AssistantAgentInstrumentor(CommonAgentWrappers): + """Instrumentor for AutoGen AssistantAgent with specialized LLM and tool instrumentation.""" + + def get_wrappers(self): + """Get list of methods to wrap for AssistantAgent.""" + # Add all the standard agent wrappers + wrappers = [ + ("AssistantAgent", "__init__", lambda: self._create_agent_wrapper()), + ("AssistantAgent", "run", lambda: self._run_wrapper()), + ("AssistantAgent", "run_stream", lambda: self._run_stream_wrapper()), + ("AssistantAgent", "on_messages_stream", lambda: self._on_messages_stream_wrapper()), + ("AssistantAgent", "_call_llm", lambda: self._call_llm_wrapper()), + ("AssistantAgent", "_execute_tool_call", lambda: self._execute_tool_call_wrapper()), + ] + + return wrappers + + +class UserProxyAgentInstrumentor(CommonAgentWrappers): + """Instrumentor for AutoGen UserProxyAgent.""" + + def get_wrappers(self): + """Get list of methods to wrap for UserProxyAgent.""" + return [ + ("UserProxyAgent", "__init__", lambda: self._create_agent_wrapper()), + ("UserProxyAgent", "run", lambda: self._run_wrapper()), + ("UserProxyAgent", "run_stream", lambda: self._run_stream_wrapper()), + ("UserProxyAgent", "on_messages", lambda: self._on_messages_wrapper()), + ("UserProxyAgent", "on_messages_stream", lambda: self._on_messages_stream_wrapper()), + ] + + +class CodeExecutorAgentInstrumentor(CommonAgentWrappers): + """Instrumentor for AutoGen CodeExecutorAgent.""" + + def get_wrappers(self): + """Get list of methods to wrap for CodeExecutorAgent.""" + # Standard agent wrappers plus LLM and tool-call wrappers so we capture model usage. + return [ + ("CodeExecutorAgent", "__init__", lambda: self._create_agent_wrapper()), + ("CodeExecutorAgent", "run", lambda: self._run_wrapper()), + ("CodeExecutorAgent", "run_stream", lambda: self._run_stream_wrapper()), + ("CodeExecutorAgent", "on_messages", lambda: self._on_messages_wrapper()), + ("CodeExecutorAgent", "on_messages_stream", lambda: self._on_messages_stream_wrapper()), + ("CodeExecutorAgent", "_call_llm", lambda: self._call_llm_wrapper()), + ("CodeExecutorAgent", "_reflect_on_code_block_results_flow", lambda: self._call_llm_wrapper()), + ] + + +class SocietyOfMindAgentInstrumentor(CommonAgentWrappers): + """Instrumentor for AutoGen SocietyOfMindAgent.""" + + def get_wrappers(self): + """Get list of methods to wrap for SocietyOfMindAgent.""" + return [ + ("SocietyOfMindAgent", "__init__", lambda: self._create_agent_wrapper()), + ("SocietyOfMindAgent", "run", lambda: self._run_wrapper()), + ("SocietyOfMindAgent", "run_stream", lambda: self._run_stream_wrapper()), + ("SocietyOfMindAgent", "on_messages", lambda: self._on_messages_wrapper()), + ("SocietyOfMindAgent", "on_messages_stream", lambda: self._on_messages_stream_wrapper()), + ] diff --git a/agentops/instrumentation/agentic/autogen/instrumentor.py b/agentops/instrumentation/agentic/autogen/instrumentor.py new file mode 100644 index 000000000..4b05eef53 --- /dev/null +++ b/agentops/instrumentation/agentic/autogen/instrumentor.py @@ -0,0 +1,183 @@ +"""AutoGen (Original) Instrumentation Module + +This module provides a clean, modular instrumentation for the original AutoGen framework. +It uses specialized instrumentors for different agent types and operations. +""" + +import logging +from typing import Dict, Any +from opentelemetry.metrics import Meter +from wrapt import wrap_function_wrapper +from opentelemetry.instrumentation.utils import unwrap as otel_unwrap + +from agentops.instrumentation.common import ( + CommonInstrumentor, + InstrumentorConfig, + StandardMetrics, + SpanAttributeManager, +) +from agentops.instrumentation.agentic.autogen import LIBRARY_NAME, LIBRARY_VERSION +from agentops.instrumentation.agentic.autogen.noop_tracer import disable_autogen_telemetry, restore_autogen_telemetry + +# Import modular instrumentors +from .agents import ( + BaseChatAgentInstrumentor, + AssistantAgentInstrumentor, + UserProxyAgentInstrumentor, + CodeExecutorAgentInstrumentor, + SocietyOfMindAgentInstrumentor, +) +from .teams import ( + RoundRobinGroupChatInstrumentor, + SelectorGroupChatInstrumentor, + SwarmInstrumentor, +) + +logger = logging.getLogger(__name__) + + +class AutoGenInstrumentor(CommonInstrumentor): + """Refactored Instrumentor for original AutoGen framework + + This instrumentor uses modular agent-specific instrumentors for clean + separation of concerns and better maintainability. + """ + + def __init__(self): + config = InstrumentorConfig( + library_name=LIBRARY_NAME, + library_version=LIBRARY_VERSION, + wrapped_methods=[], + metrics_enabled=True, + dependencies=["autogen_agentchat >= 0.6.4"], + ) + super().__init__(config) + self._attribute_manager = None + self._agent_instrumentors = [] + self._team_instrumentors = [] + + def _create_metrics(self, meter: Meter) -> Dict[str, Any]: + """Create metrics for AutoGen instrumentation.""" + return StandardMetrics.create_standard_metrics(meter) + + def _initialize(self, **kwargs): + """Initialize attribute manager and modular instrumentors.""" + self._attribute_manager = SpanAttributeManager(service_name="agentops", deployment_environment="production") + + # Initialize agent instrumentors + self._agent_instrumentors = [ + BaseChatAgentInstrumentor(self._tracer, self._attribute_manager), + AssistantAgentInstrumentor(self._tracer, self._attribute_manager), + UserProxyAgentInstrumentor(self._tracer, self._attribute_manager), + CodeExecutorAgentInstrumentor(self._tracer, self._attribute_manager), + SocietyOfMindAgentInstrumentor(self._tracer, self._attribute_manager), + ] + + # Initialize team instrumentors + self._team_instrumentors = [ + RoundRobinGroupChatInstrumentor(self._tracer, self._attribute_manager), + SelectorGroupChatInstrumentor(self._tracer, self._attribute_manager), + SwarmInstrumentor(self._tracer, self._attribute_manager), + ] + + def _enhance_autogen_core_telemetry(self): + """Disable autogen-core's telemetry to prevent duplicate spans.""" + disable_autogen_telemetry() + + def _custom_wrap(self, **kwargs): + """Perform custom wrapping using modular instrumentors.""" + logger.debug("[AutoGen DEBUG] Starting modular wrapping for AutoGen methods...") + + # Disable autogen-core's telemetry + self._enhance_autogen_core_telemetry() + + # Collect all wrappers from agent instrumentors + all_wrappers = [] + + for instrumentor in self._agent_instrumentors: + wrappers = instrumentor.get_wrappers() + all_wrappers.extend(wrappers) + + for instrumentor in self._team_instrumentors: + wrappers = instrumentor.get_wrappers() + all_wrappers.extend(wrappers) + + # Apply all wrappers + for wrapper_data in all_wrappers: + try: + # Support both 3-tuple (class, method, factory) and 4-tuple (module, class, method, factory) + if len(wrapper_data) == 4: + module_name, class_name, method_name, wrapper_factory = wrapper_data + else: + class_name, method_name, wrapper_factory = wrapper_data # type: ignore + module_name = "autogen_agentchat.agents" + + wrapper = wrapper_factory() + wrap_function_wrapper(module_name, f"{class_name}.{method_name}", wrapper) + + except (AttributeError, ModuleNotFoundError) as e: + logger.debug(f"[AutoGen DEBUG] Failed to wrap {wrapper_data}: {e}") + + def _custom_unwrap(self, **kwargs): + """Remove instrumentation from AutoGen using modular approach.""" + logger.debug("[AutoGen DEBUG] Unwrapping AutoGen methods...") + + # Restore autogen-core's original telemetry + restore_autogen_telemetry() + + # Collect all method paths to unwrap + all_method_paths = [] + + def _add_paths_from_wrappers(wrappers): + for wrapper_data in wrappers: + if len(wrapper_data) == 4: + module_name, class_name, method_name, _ = wrapper_data + else: + class_name, method_name, _ = wrapper_data # type: ignore + module_name = "autogen_agentchat.agents" + all_method_paths.append((module_name, f"{class_name}.{method_name}")) + + for instrumentor in self._agent_instrumentors: + _add_paths_from_wrappers(instrumentor.get_wrappers()) + + for instrumentor in self._team_instrumentors: + _add_paths_from_wrappers(instrumentor.get_wrappers()) + + # Unwrap all methods + for module, method in all_method_paths: + try: + otel_unwrap(module, method) + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to unwrap {method}: {e}") + + def get_agent_instrumentor(self, agent_type: str): + """Get a specific agent instrumentor by type.""" + instrumentor_map = { + "BaseChatAgent": BaseChatAgentInstrumentor, + "AssistantAgent": AssistantAgentInstrumentor, + "UserProxyAgent": UserProxyAgentInstrumentor, + "CodeExecutorAgent": CodeExecutorAgentInstrumentor, + "SocietyOfMindAgent": SocietyOfMindAgentInstrumentor, + } + + instrumentor_class = instrumentor_map.get(agent_type) + if instrumentor_class: + for instrumentor in self._agent_instrumentors: + if isinstance(instrumentor, instrumentor_class): + return instrumentor + return None + + def get_team_instrumentor(self, team_type: str): + """Get a specific team instrumentor by type.""" + instrumentor_map = { + "RoundRobinGroupChat": RoundRobinGroupChatInstrumentor, + "SelectorGroupChat": SelectorGroupChatInstrumentor, + "Swarm": SwarmInstrumentor, + } + + instrumentor_class = instrumentor_map.get(team_type) + if instrumentor_class: + for instrumentor in self._team_instrumentors: + if isinstance(instrumentor, instrumentor_class): + return instrumentor + return None diff --git a/agentops/instrumentation/agentic/autogen/noop_tracer.py b/agentops/instrumentation/agentic/autogen/noop_tracer.py new file mode 100644 index 000000000..e6d2be095 --- /dev/null +++ b/agentops/instrumentation/agentic/autogen/noop_tracer.py @@ -0,0 +1,420 @@ +"""NoOp Tracer and Span classes for disabling autogen-core's telemetry. + +This module provides no-operation telemetry classes that prevent autogen-core +from creating duplicate spans while allowing AgentOps to have full control +over telemetry data. + +""" +import sys +import logging +from contextlib import contextmanager +from autogen_core.tools._base import BaseTool, BaseStreamTool +from autogen_core._telemetry import _genai +import autogen_core +import autogen_agentchat.agents._base_chat_agent as base_chat_module +from autogen_core._telemetry import _tracing as _autogen_tracing +import contextlib + +logger = logging.getLogger(__name__) + + +class NoOpSpan: + """A no-op span that does nothing to disable autogen-core's telemetry.""" + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + # Properly handle context exit to avoid detachment errors + return False + + def is_recording(self): + return False + + +class NoOpTracer: + """A tracer that creates no-op spans to prevent autogen-core from creating real spans.""" + + def start_as_current_span(self, *args, **kwargs): + """Return a no-op context manager.""" + return self._noop_context_manager() + + def start_span(self, *args, **kwargs): + """Return a no-op span.""" + return NoOpSpan() + + def use_span(self, *args, **kwargs): + """Return a no-op context manager.""" + return self._noop_context_manager() + + def get_tracer(self, *args, **kwargs): + """Return self to handle nested tracer calls.""" + return self + + @contextmanager + def _noop_context_manager(self): + """A proper context manager that doesn't interfere with OpenTelemetry context.""" + try: + yield NoOpSpan() + except Exception as e: + logger.debug(f"[NoOp DEBUG] Exception in _noop_context_manager: {e}") + raise + finally: + logger.debug("[NoOp DEBUG] NoOpTracer._noop_context_manager exiting") + + +def disable_autogen_telemetry(): + """Disable autogen-core's telemetry by patching BaseTool methods directly.""" + try: + # Direct approach: Patch the BaseTool.run_json method to remove trace_tool_span usage + try: + # Store original methods + if not hasattr(BaseTool, "_original_run_json"): + setattr(BaseTool, "_original_run_json", BaseTool.run_json) + if not hasattr(BaseStreamTool, "_original_run_json_stream"): + setattr(BaseStreamTool, "_original_run_json_stream", BaseStreamTool.run_json_stream) + + # Create patched version of run_json without trace_tool_span + async def patched_run_json(self, args, cancellation_token, call_id=None): + """Patched run_json that skips trace_tool_span to prevent duplicate spans.""" + # Execute the tool's run method directly (skip tracing) + return_value = await self.run(self._args_type.model_validate(args), cancellation_token) + + return return_value + + # Create patched version of run_json_stream without trace_tool_span + async def patched_run_json_stream(self, args, cancellation_token, call_id=None): + """Patched run_json_stream that skips trace_tool_span to prevent duplicate spans.""" + return_value = None + + # Execute the tool's run_stream method directly (skip tracing) + async for result in self.run_stream(self._args_type.model_validate(args), cancellation_token): + return_value = result + yield result + + assert return_value is not None, "The tool must yield a final return value at the end of the stream." + if not isinstance(return_value, self._return_type): + raise TypeError( + f"Expected return value of type {self._return_type.__name__}, but got {type(return_value).__name__}" + ) + + # Replace the methods + BaseTool.run_json = patched_run_json + BaseStreamTool.run_json_stream = patched_run_json_stream + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to patch BaseTool methods: {e}") + + # Disable agent creation telemetry by patching trace_create_agent_span + try: + # Store original function + if not hasattr(_genai, "_original_trace_create_agent_span"): + setattr(_genai, "_original_trace_create_agent_span", _genai.trace_create_agent_span) + + # Create no-op replacement for trace_create_agent_span + @contextmanager + def noop_trace_create_agent_span(*args, **kwargs): + """No-op replacement for trace_create_agent_span to prevent duplicate create_agent spans.""" + try: + yield NoOpSpan() + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Exception in noop_trace_create_agent_span: {e}") + raise + + # Replace the function + _genai.trace_create_agent_span = noop_trace_create_agent_span + + # Also patch it in autogen_core module namespace if it's imported there + try: + if hasattr(autogen_core, "trace_create_agent_span"): + if not hasattr(autogen_core, "_original_trace_create_agent_span"): + setattr(autogen_core, "_original_trace_create_agent_span", autogen_core.trace_create_agent_span) + autogen_core.trace_create_agent_span = noop_trace_create_agent_span + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to patch autogen_core.trace_create_agent_span: {e}") + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to patch trace_create_agent_span: {e}") + + # NEW: Disable agent invocation telemetry by patching trace_invoke_agent_span + try: + # Store original function + if not hasattr(_genai, "_original_trace_invoke_agent_span"): + setattr(_genai, "_original_trace_invoke_agent_span", _genai.trace_invoke_agent_span) + + # Create no-op replacement for trace_invoke_agent_span + @contextmanager + def noop_trace_invoke_agent_span(*args, **kwargs): + """No-op replacement for trace_invoke_agent_span to prevent duplicate invoke_agent spans.""" + try: + yield NoOpSpan() + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Exception in noop_trace_invoke_agent_span: {e}") + raise + finally: + logger.debug("[AutoGen DEBUG] noop_trace_invoke_agent_span exiting") + + # Replace the function + _genai.trace_invoke_agent_span = noop_trace_invoke_agent_span + + # Also patch it in autogen_core module namespace if it's imported there + try: + if hasattr(autogen_core, "trace_invoke_agent_span"): + if not hasattr(autogen_core, "_original_trace_invoke_agent_span"): + setattr(autogen_core, "_original_trace_invoke_agent_span", autogen_core.trace_invoke_agent_span) + autogen_core.trace_invoke_agent_span = noop_trace_invoke_agent_span + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to patch autogen_core.trace_invoke_agent_span: {e}") + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to patch trace_invoke_agent_span: {e}") + + # NEW: Disable tool telemetry by patching trace_tool_span + try: + # Store original function + if not hasattr(_genai, "_original_trace_tool_span"): + setattr(_genai, "_original_trace_tool_span", _genai.trace_tool_span) + + # Create no-op replacement for trace_tool_span + @contextmanager + def noop_trace_tool_span(*args, **kwargs): + """No-op replacement for trace_tool_span to prevent tool tracing context issues.""" + try: + yield NoOpSpan() + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Exception in noop_trace_tool_span: {e}") + raise + finally: + logger.debug("[AutoGen DEBUG] noop_trace_tool_span exiting") + + # Replace the function + _genai.trace_tool_span = noop_trace_tool_span + + # Also patch it in autogen_core module namespace if it's imported there + try: + if hasattr(autogen_core, "trace_tool_span"): + if not hasattr(autogen_core, "_original_trace_tool_span"): + setattr(autogen_core, "_original_trace_tool_span", autogen_core.trace_tool_span) + autogen_core.trace_tool_span = noop_trace_tool_span + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to patch autogen_core.trace_tool_span: {e}") + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to patch trace_tool_span: {e}") + + try: + # Patch TraceHelper.trace_block to be a no-op context manager + if hasattr(_autogen_tracing.TraceHelper, "trace_block") and not hasattr( + _autogen_tracing.TraceHelper, "_original_trace_block" + ): + setattr( + _autogen_tracing.TraceHelper, + "_original_trace_block", + _autogen_tracing.TraceHelper.trace_block, + ) + + def _noop_trace_block(self, *args, **kwargs): # type: ignore[override] + @contextlib.contextmanager + def _cm(): + yield NoOpSpan() + + # Return the context manager so callers can use `with` as usual + return _cm() + + _autogen_tracing.TraceHelper.trace_block = _noop_trace_block # type: ignore[assignment] + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to patch TraceHelper.trace_block: {e}") + + # Also patch any existing tracer instances in autogen modules + + modules_to_check = [ + "autogen_core.tools._base", + "autogen_core._telemetry._genai", + "autogen_agentchat.agents._assistant_agent", + "autogen_agentchat.agents._base_chat_agent", + ] + + noop_tracer = NoOpTracer() + for module_name in modules_to_check: + if module_name in sys.modules: + try: + module = sys.modules[module_name] + # Look for tracer attributes and replace them + if hasattr(module, "tracer"): + setattr(module, "tracer", noop_tracer) + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to replace tracer in {module_name}: {e}") + + # CRITICAL: Patch the specific modules that import and use trace_create_agent_span + try: + # Patch the base chat agent module which directly imports and uses trace_create_agent_span + + # Store original function if not already stored and if it exists + if hasattr(base_chat_module, "trace_create_agent_span"): + if not hasattr(base_chat_module, "_original_trace_create_agent_span"): + original_func = getattr(base_chat_module, "trace_create_agent_span") + setattr(base_chat_module, "_original_trace_create_agent_span", original_func) + + # Create no-op replacement + @contextmanager + def noop_trace_create_agent_span_local(*args, **kwargs): + """No-op replacement for trace_create_agent_span in base chat agent module.""" + try: + yield NoOpSpan() + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Exception in noop_trace_create_agent_span_local: {e}") + raise + + # Replace the imported function in the module + setattr(base_chat_module, "trace_create_agent_span", noop_trace_create_agent_span_local) + else: + logger.debug("[AutoGen DEBUG] trace_create_agent_span not found in base chat agent module") + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to patch base chat agent module: {e}") + + # Also patch the invoke agent span in the same module + try: + # Store original function if not already stored and if it exists + if hasattr(base_chat_module, "trace_invoke_agent_span"): + if not hasattr(base_chat_module, "_original_trace_invoke_agent_span"): + original_func = getattr(base_chat_module, "trace_invoke_agent_span") + setattr(base_chat_module, "_original_trace_invoke_agent_span", original_func) + + # Create no-op replacement + @contextmanager + def noop_trace_invoke_agent_span_local(*args, **kwargs): + """No-op replacement for trace_invoke_agent_span in base chat agent module.""" + try: + yield NoOpSpan() + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Exception in noop_trace_invoke_agent_span_local: {e}") + raise + finally: + logger.debug("[AutoGen DEBUG] noop_trace_invoke_agent_span_local exiting") + + # Replace the imported function in the module + setattr(base_chat_module, "trace_invoke_agent_span", noop_trace_invoke_agent_span_local) + else: + logger.debug("[AutoGen DEBUG] trace_invoke_agent_span not found in base chat agent module") + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to patch invoke agent span in base chat agent module: {e}") + + logger.debug("[AutoGen DEBUG] Successfully disabled autogen-core telemetry") + return True + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to disable autogen-core telemetry: {e}") + return False + + +def restore_autogen_telemetry(): + """Restore autogen-core's original telemetry (for cleanup/unwrapping).""" + try: + # Restore original BaseTool methods + try: + # Restore original methods if they were saved + if hasattr(BaseTool, "_original_run_json"): + original_method = getattr(BaseTool, "_original_run_json") + BaseTool.run_json = original_method + delattr(BaseTool, "_original_run_json") + + if hasattr(BaseStreamTool, "_original_run_json_stream"): + original_method = getattr(BaseStreamTool, "_original_run_json_stream") + BaseStreamTool.run_json_stream = original_method + delattr(BaseStreamTool, "_original_run_json_stream") + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to restore BaseTool methods: {e}") + + # NEW: Restore original trace_create_agent_span function + try: + # Restore original function if it was saved + if hasattr(_genai, "_original_trace_create_agent_span"): + original_function = getattr(_genai, "_original_trace_create_agent_span") + _genai.trace_create_agent_span = original_function + delattr(_genai, "_original_trace_create_agent_span") + + # Also restore in autogen_core module namespace if it was patched + try: + if hasattr(autogen_core, "_original_trace_create_agent_span"): + original_function = getattr(autogen_core, "_original_trace_create_agent_span") + autogen_core.trace_create_agent_span = original_function + delattr(autogen_core, "_original_trace_create_agent_span") + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to restore autogen_core.trace_create_agent_span: {e}") + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to restore trace_create_agent_span: {e}") + + # NEW: Restore original trace_invoke_agent_span function + try: + # Restore original function if it was saved + if hasattr(_genai, "_original_trace_invoke_agent_span"): + original_function = getattr(_genai, "_original_trace_invoke_agent_span") + _genai.trace_invoke_agent_span = original_function + delattr(_genai, "_original_trace_invoke_agent_span") + + # Also restore in autogen_core module namespace if it was patched + try: + if hasattr(autogen_core, "_original_trace_invoke_agent_span"): + original_function = getattr(autogen_core, "_original_trace_invoke_agent_span") + autogen_core.trace_invoke_agent_span = original_function + delattr(autogen_core, "_original_trace_invoke_agent_span") + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to restore autogen_core.trace_invoke_agent_span: {e}") + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to restore trace_invoke_agent_span: {e}") + + # NEW: Restore original trace_tool_span function + try: + # Restore original function if it was saved + if hasattr(_genai, "_original_trace_tool_span"): + original_function = getattr(_genai, "_original_trace_tool_span") + _genai.trace_tool_span = original_function + delattr(_genai, "_original_trace_tool_span") + + # Also restore in autogen_core module namespace if it was patched + + if hasattr(autogen_core, "_original_trace_tool_span"): + original_function = getattr(autogen_core, "_original_trace_tool_span") + autogen_core.trace_tool_span = original_function + delattr(autogen_core, "_original_trace_tool_span") + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to restore trace_tool_span: {e}") + + # NEW: Restore original functions in base chat agent module + try: + # Restore trace_create_agent_span if it was patched + if hasattr(base_chat_module, "_original_trace_create_agent_span"): + original_function = getattr(base_chat_module, "_original_trace_create_agent_span") + setattr(base_chat_module, "trace_create_agent_span", original_function) + delattr(base_chat_module, "_original_trace_create_agent_span") + logger.debug("[AutoGen DEBUG] Restored trace_create_agent_span in base chat agent module") + + # Restore trace_invoke_agent_span if it was patched + if hasattr(base_chat_module, "_original_trace_invoke_agent_span"): + original_function = getattr(base_chat_module, "_original_trace_invoke_agent_span") + setattr(base_chat_module, "trace_invoke_agent_span", original_function) + delattr(base_chat_module, "_original_trace_invoke_agent_span") + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to restore functions in base chat agent module: {e}") + + try: + if hasattr(_autogen_tracing.TraceHelper, "_original_trace_block"): + original_func = getattr(_autogen_tracing.TraceHelper, "_original_trace_block") + _autogen_tracing.TraceHelper.trace_block = original_func # type: ignore[assignment] + delattr(_autogen_tracing.TraceHelper, "_original_trace_block") + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to restore TraceHelper.trace_block: {e}") + + return True + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Failed to restore autogen-core telemetry: {e}") + return False diff --git a/agentops/instrumentation/agentic/autogen/teams/__init__.py b/agentops/instrumentation/agentic/autogen/teams/__init__.py new file mode 100644 index 000000000..173af1072 --- /dev/null +++ b/agentops/instrumentation/agentic/autogen/teams/__init__.py @@ -0,0 +1,16 @@ +"""AutoGen Team Instrumentors + +This module contains instrumentation for AutoGen team and group chat operations. +Teams handle multi-agent coordination and workflows. +""" + +from .round_robin_group_chat import RoundRobinGroupChatInstrumentor +from .selector_group_chat import SelectorGroupChatInstrumentor +from .swarm import SwarmInstrumentor + + +__all__ = [ + "RoundRobinGroupChatInstrumentor", + "SelectorGroupChatInstrumentor", + "SwarmInstrumentor", +] diff --git a/agentops/instrumentation/agentic/autogen/teams/round_robin_group_chat.py b/agentops/instrumentation/agentic/autogen/teams/round_robin_group_chat.py new file mode 100644 index 000000000..7d2c43acc --- /dev/null +++ b/agentops/instrumentation/agentic/autogen/teams/round_robin_group_chat.py @@ -0,0 +1,121 @@ +"""RoundRobinGroupChat Instrumentor for AutoGen + +This module provides instrumentation specifically for RoundRobinGroupChat, +which handles round-robin multi-agent conversations. +""" + +import logging + +from agentops.instrumentation.common import SpanAttributeManager +from agentops.semconv.span_attributes import SpanAttributes +from ..utils.common import ( + AutoGenSpanManager, + safe_str, + safe_extract_content, +) +from inspect import iscoroutine +from opentelemetry.trace import set_span_in_context +from opentelemetry import context as context_api + +logger = logging.getLogger(__name__) + + +class RoundRobinGroupChatInstrumentor: + """Instrumentor for RoundRobinGroupChat operations.""" + + def __init__(self, tracer, attribute_manager: SpanAttributeManager): + self.tracer = tracer + self.attribute_manager = attribute_manager + self.span_manager = AutoGenSpanManager(tracer, attribute_manager) + + def get_wrappers(self): + """Get list of methods to wrap for RoundRobinGroupChat and base group chat manager. + + Returns a list of tuples describing what to wrap. The tuple structure is: + (module_path, class_name, method_name, wrapper_factory) + + module_path – fully-qualified python module that contains the class + class_name – the class that owns the method to wrap + method_name – the method on that class + wrapper_factory – lambda returning the actual wrapper function + """ + + base_module = "autogen_agentchat.teams._group_chat._base_group_chat_manager" + + return [ + # Wrap the method that transitions control to the next agent(s). + # This represents an *agent* level span because it orchestrates agent execution. + ( + base_module, + "BaseGroupChatManager", + "_transition_to_next_speakers", + lambda: self._transition_wrapper(), + ), + ] + + def _transition_wrapper(self): + """Create a wrapper for `_transition_to_next_speakers` to emit an *agent* span.""" + + def wrapper(wrapped, instance, args, kwargs): + agent_name = getattr(instance, "_name", "group_chat") + # Attempt to extract the current task / prompt text for naming + task_text = None + if args and isinstance(args[0], str): + task_text = args[0].strip() + + if not task_text: + task_text = getattr(instance, "_current_task", None) + + span_name_base = task_text if task_text else agent_name + span_name = f"{span_name_base}.task" if not str(span_name_base).endswith(".task") else span_name_base + + # Start span manually so we can end it after async completes + span = self.tracer.start_span(span_name) + + token = context_api.attach(set_span_in_context(span)) + + # Set attributes immediately + span.set_attribute("gen_ai.system", "autogen") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, "workflow.step") + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "task") + + if task_text: + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_INPUT, safe_str(task_text, 500)) + + self.span_manager.set_base_attributes(span, agent_name, "task") + + result = wrapped(*args, **kwargs) + + if iscoroutine(result): + + async def instrumented(): + try: + output = await result + # capture output + try: + content = safe_extract_content(output) + if content: + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_OUTPUT, safe_str(content, 500)) + except Exception: + pass + return output + finally: + span.end() + context_api.detach(token) + + return instrumented() + + # synchronous path + try: + content = safe_extract_content(result) + if content: + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_OUTPUT, safe_str(content, 500)) + except Exception: + pass + + span.end() + context_api.detach(token) + + return result + + return wrapper diff --git a/agentops/instrumentation/agentic/autogen/teams/selector_group_chat.py b/agentops/instrumentation/agentic/autogen/teams/selector_group_chat.py new file mode 100644 index 000000000..d799551fc --- /dev/null +++ b/agentops/instrumentation/agentic/autogen/teams/selector_group_chat.py @@ -0,0 +1,77 @@ +"""SelectorGroupChat Instrumentor for AutoGen + +This module provides instrumentation specifically for SelectorGroupChat. +""" + +import logging + +from opentelemetry.trace import SpanKind + +from agentops.instrumentation.common import SpanAttributeManager +from agentops.semconv.span_attributes import SpanAttributes +from agentops.semconv.span_kinds import AgentOpsSpanKindValues + +from ..utils.common import AutoGenSpanManager + + +logger = logging.getLogger(__name__) + + +class SelectorGroupChatInstrumentor: + """Instrumentor for SelectorGroupChat operations.""" + + def __init__(self, tracer, attribute_manager: SpanAttributeManager): + self.tracer = tracer + self.attribute_manager = attribute_manager + self.span_manager = AutoGenSpanManager(tracer, attribute_manager) + + def get_wrappers(self): + """Return wrapper descriptors for AutoGen `SelectorGroupChat`. + + Each descriptor is a 4-tuple: + (module_path, class_name, method_name, wrapper_factory) + """ + + module_path = "autogen_agentchat.teams._group_chat._selector_group_chat" + + return [ + ( + module_path, + "SelectorGroupChat", + "__init__", + lambda: self._init_wrapper(), + ), + ] + + def _init_wrapper(self): + """Wrap `SelectorGroupChat.__init__` with a synchronous workflow span.""" + + def wrapper(wrapped, instance, args, kwargs): + # Attempt to extract participants list from positional / keyword args + participants = [] + if len(args) > 0 and isinstance(args[0], list): + participants = args[0] + elif "participants" in kwargs and isinstance(kwargs["participants"], list): + participants = kwargs["participants"] + + participant_names = [] + try: + participant_names = [p.name for p in participants if hasattr(p, "name")] + except Exception: + pass + + names_fragment = ",".join(participant_names) if participant_names else "selector" + span_name = f"{names_fragment}.workflow" + + with self.tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span: + span.set_attribute("gen_ai.system", "autogen") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.WORKFLOW.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "workflow") + + if participant_names: + span.set_attribute("autogen.participants", ", ".join(participant_names)) + + # Continue with original constructor + return wrapped(*args, **kwargs) + + return wrapper diff --git a/agentops/instrumentation/agentic/autogen/teams/swarm.py b/agentops/instrumentation/agentic/autogen/teams/swarm.py new file mode 100644 index 000000000..e9c0f2f8e --- /dev/null +++ b/agentops/instrumentation/agentic/autogen/teams/swarm.py @@ -0,0 +1,82 @@ +"""Swarm Instrumentor for AutoGen + +This module provides instrumentation specifically for Swarm. +""" + +import logging + +from opentelemetry.trace import SpanKind + +from agentops.instrumentation.common import SpanAttributeManager +from agentops.semconv.span_attributes import SpanAttributes +from agentops.semconv.span_kinds import AgentOpsSpanKindValues + +from ..utils.common import AutoGenSpanManager + + +logger = logging.getLogger(__name__) + + +class SwarmInstrumentor: + """Instrumentor for Swarm operations.""" + + def __init__(self, tracer, attribute_manager: SpanAttributeManager): + self.tracer = tracer + self.attribute_manager = attribute_manager + self.span_manager = AutoGenSpanManager(tracer, attribute_manager) + + def get_wrappers(self): + """Return wrapper descriptors to patch AutoGen Swarm team. + + Format: (module_path, class_name, method_name, wrapper_factory) + """ + + module_path = "autogen_agentchat.teams._group_chat._swarm_group_chat" + + return [ + ( + module_path, + "Swarm", + "__init__", + lambda: self._init_wrapper(), + ), + ] + + def _init_wrapper(self): + """Wrap ``Swarm.__init__`` to create a top-level *workflow* span. + + We treat construction of a Swarm team as the beginning of a workflow. This span + is **synchronous** – it starts before the team is initialised and ends right after. + """ + + def wrapper(wrapped, instance, args, kwargs): + # Extract participant names (best-effort). + participants = [] + if len(args) > 0 and isinstance(args[0], list): + participants = args[0] + elif "participants" in kwargs and isinstance(kwargs["participants"], list): + participants = kwargs["participants"] + + participant_names = [] + try: + participant_names = [p.name for p in participants if hasattr(p, "name")] + except Exception: + pass + + # Build span name – e.g., "swarm.Alice,Bob.workflow" + names_fragment = ",".join(participant_names) if participant_names else "swarm" + span_name = f"{names_fragment}.workflow" + + with self.tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span: + # Standard attributes + span.set_attribute("gen_ai.system", "autogen") + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.WORKFLOW.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "workflow") + + if participant_names: + span.set_attribute("autogen.participants", ", ".join(participant_names)) + + # Delegate to original __init__ + return wrapped(*args, **kwargs) + + return wrapper diff --git a/agentops/instrumentation/agentic/autogen/utils/__init__.py b/agentops/instrumentation/agentic/autogen/utils/__init__.py new file mode 100644 index 000000000..a6557931e --- /dev/null +++ b/agentops/instrumentation/agentic/autogen/utils/__init__.py @@ -0,0 +1,25 @@ +"""AutoGen Instrumentation Utilities + +This module contains shared utilities and common functionality used across +all AutoGen agent instrumentors. +""" + +from .common import ( + AutoGenSpanManager, + extract_agent_attributes, + safe_str, + safe_extract_content, + create_agent_span, + instrument_async_generator, + instrument_coroutine, +) + +__all__ = [ + "AutoGenSpanManager", + "extract_agent_attributes", + "safe_str", + "safe_extract_content", + "create_agent_span", + "instrument_async_generator", + "instrument_coroutine", +] diff --git a/agentops/instrumentation/agentic/autogen/utils/common.py b/agentops/instrumentation/agentic/autogen/utils/common.py new file mode 100644 index 000000000..32c0f30d8 --- /dev/null +++ b/agentops/instrumentation/agentic/autogen/utils/common.py @@ -0,0 +1,175 @@ +"""Common utilities for AutoGen instrumentation""" + +import logging +from typing import Any, Dict, Optional, AsyncGenerator, Awaitable +from opentelemetry.trace import SpanKind, Status, StatusCode, Span + +from agentops.instrumentation.common import ( + SpanAttributeManager, + create_span, +) +from agentops.semconv.agent import AgentAttributes +from agentops.semconv.span_attributes import SpanAttributes +from agentops.semconv.span_kinds import AgentOpsSpanKindValues + +logger = logging.getLogger(__name__) + + +class AutoGenSpanManager: + """Manages spans for AutoGen agent operations.""" + + def __init__(self, tracer, attribute_manager: SpanAttributeManager): + self.tracer = tracer + self.attribute_manager = attribute_manager + + def create_agent_span(self, agent_name: str, operation: str = "agent"): + """Create a span for an agent operation.""" + if not self.tracer: + return None + + span_name = f"{operation}.{agent_name}.workflow" + span_context = create_span( + self.tracer, span_name, kind=SpanKind.CLIENT, attribute_manager=self.attribute_manager + ) + return span_context + + def set_base_attributes(self, span: Span, agent_name: str, operation: str): + """Set base attributes common to all AutoGen operations.""" + span.set_attribute("gen_ai.operation.name", operation) + span.set_attribute("gen_ai.system", "autogen") + span.set_attribute("gen_ai.agent.name", agent_name) + span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value) + span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent") + span.set_attribute(AgentAttributes.AGENT_NAME, agent_name) + + +def extract_agent_attributes(instance, args, kwargs) -> Dict[str, Any]: + """Extract agent attributes from instance, args, and kwargs.""" + attributes = {} + + # Get name from kwargs or args (different signatures for different agents) + name = kwargs.get("name") or (args[0] if args else "unnamed_agent") + attributes["name"] = name + + # Determine agent type from class name + agent_type = instance.__class__.__name__ if hasattr(instance, "__class__") else "Agent" + attributes["type"] = agent_type + + # Get description from different possible sources + description = kwargs.get("description") or kwargs.get("system_message") or (args[1] if len(args) > 1 else "") + if description: + attributes["description"] = safe_str(description) + + return attributes + + +def safe_str(value: Any, max_length: Optional[int] = None) -> str: + """Safely convert *value* to a string.""" + + try: + if value is None: + return "" + + str_val = str(value) + + if max_length is not None and max_length > 0 and len(str_val) > max_length: + return str_val[:max_length] + "..." + + return str_val + except Exception: + return "" + + +def safe_extract_content(obj: Any, content_attr: str = "content") -> str: + """Safely extract content from an object.""" + try: + if hasattr(obj, content_attr): + content = getattr(obj, content_attr) + return safe_str(content) + elif hasattr(obj, "__dict__"): + # Try to extract from dict + content = obj.__dict__.get(content_attr, "") + return safe_str(content) + except Exception as e: + logger.debug(f"Error extracting content: {e}") + return "" + + +def create_agent_span(tracer, agent_name: str, operation: str, attribute_manager: SpanAttributeManager): + """Create a span for agent operations with standard attributes.""" + if not tracer: + logger.debug("[AutoGen DEBUG] No tracer available, skipping span creation") + return None + + span_name = f"{operation} {agent_name}.workflow" + span_context = create_span(tracer, span_name, kind=SpanKind.CLIENT, attribute_manager=attribute_manager) + return span_context + + +async def instrument_async_generator(generator: AsyncGenerator, span: Span, operation: str): + """Instrument an async generator with span tracking.""" + item_count = 0 + event_types = set() + total_tokens = 0 + total_prompt_tokens = 0 + total_completion_tokens = 0 + + try: + async for item in generator: + item_count += 1 + item_type = type(item).__name__ + event_types.add(item_type) + + # Track token usage if available + if hasattr(item, "usage") and item.usage: + if hasattr(item.usage, "prompt_tokens"): + total_prompt_tokens += item.usage.prompt_tokens or 0 + if hasattr(item.usage, "completion_tokens"): + total_completion_tokens += item.usage.completion_tokens or 0 + if hasattr(item.usage, "total_tokens"): + total_tokens += item.usage.total_tokens or 0 + + yield item + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Error in instrumented generator: {e}") + span.set_status(Status(StatusCode.ERROR, str(e))) + raise + finally: + # Set final span attributes + span.set_attribute(f"gen_ai.{operation}.item_count", item_count) + span.set_attribute(f"gen_ai.{operation}.event_types", ", ".join(event_types)) + + if total_tokens > 0: + span.set_attribute("gen_ai.usage.prompt_tokens", total_prompt_tokens) + span.set_attribute("gen_ai.usage.completion_tokens", total_completion_tokens) + span.set_attribute("gen_ai.usage.total_tokens", total_tokens) + + +async def instrument_coroutine(coro: Awaitable, span: Span, operation: str): + """Instrument a coroutine with span tracking.""" + try: + result = await coro + + # Track result attributes + if hasattr(result, "__dict__"): + if hasattr(result, "chat_message"): + content = safe_extract_content(result.chat_message) + if content: + span.set_attribute(f"gen_ai.{operation}.response_content", safe_str(content, 500)) + + # Track usage if available + if hasattr(result, "usage") and result.usage: + if hasattr(result.usage, "prompt_tokens"): + span.set_attribute("gen_ai.usage.prompt_tokens", result.usage.prompt_tokens or 0) + if hasattr(result.usage, "completion_tokens"): + span.set_attribute("gen_ai.usage.completion_tokens", result.usage.completion_tokens or 0) + if hasattr(result.usage, "total_tokens"): + span.set_attribute("gen_ai.usage.total_tokens", result.usage.total_tokens or 0) + + return result + + except Exception as e: + logger.debug(f"[AutoGen DEBUG] Error in {operation}: {e}") + span.set_status(Status(StatusCode.ERROR, str(e))) + raise diff --git a/docs/v2/examples/autogen.mdx b/docs/v2/examples/autogen.mdx index 2d6b128b1..c8b679864 100644 --- a/docs/v2/examples/autogen.mdx +++ b/docs/v2/examples/autogen.mdx @@ -1,147 +1,158 @@ --- -title: 'AutoGen' -description: 'Microsoft Autogen Chat Example' +title: 'Autogen' +description: 'Microsoft Autogen Assistant Chat Example' --- {/* SOURCE_FILE: examples/autogen/AgentChat.ipynb */} _View Notebook on Github_ -# Microsoft Autogen Chat Example +# Microsoft Autogen Assistant Chat Example -AgentOps automatically configures itself when it's initialized meaning your agent run data will be tracked and logged to your AgentOps dashboard right away. +## Overview +This script demonstrates how to build an interactive AI assistant chat using Microsoft Autogen and AgentOps. +You will see how to: -First let's install the required packages + 1. Initialize an assistant agent with OpenAI's GPT-4 model + 2. Equip the agent with a calculator tool for step-by-step math problem solving + 3. Track and log all agent interactions automatically with AgentOps + 4. Simulate a conversation loop where the assistant and user exchange messages until the task is complete +This approach is useful for building conversational AI agents that can reason through complex tasks, show their work, and provide transparent, auditable results. -## Installation +## First let's install the required packages +### Installation ```bash pip - pip install "autogen-ext[openai]" -U agentops autogen-agentchat python-dotenv + pip install "autogen-ext[openai]" -U "ag2[autogen-agentchat]" agentops python-dotenv nest_asyncio ``` ```bash poetry - poetry add "autogen-ext[openai]" -U agentops autogen-agentchat python-dotenv + poetry add "autogen-ext[openai]" -U "ag2[autogen-agentchat]" agentops python-dotenv nest_asyncio ``` ```bash uv - uv pip install "autogen-ext[openai]" -U agentops autogen-agentchat python-dotenv + uv add "autogen-ext[openai]" -U "ag2[autogen-agentchat]" agentops python-dotenv nest_asyncio ``` -Then import them + +Next, we'll set our API keys. There are several ways to do this, the code below is just the most foolproof way for the purposes of this notebook. It accounts for both users who use environment variables and those who just want to set the API Key here in this notebook. + +[Get an AgentOps API key](https://agentops.ai/settings/projects) + +1. Create an environment variable in a .env file or other method. By default, the AgentOps `init()` function will look for an environment variable named `AGENTOPS_API_KEY`. Or... + +2. Replace `` below and pass in the optional `api_key` parameter to the AgentOps `init(api_key=...)` function. Remember not to commit your API key to a public repo! ```python import os +import asyncio from dotenv import load_dotenv from IPython.core.error import ( StdinNotImplementedError, ) -import asyncio - +from typing import Annotated, Literal import agentops - +import nest_asyncio from autogen_agentchat.agents import AssistantAgent, UserProxyAgent from autogen_ext.models.openai import OpenAIChatCompletionClient +from autogen_agentchat.messages import TextMessage +from autogen_core import CancellationToken -from autogen_agentchat.teams import RoundRobinGroupChat -from autogen_agentchat.conditions import MaxMessageTermination -from autogen_agentchat.ui import Console -``` - -Next, we'll set our API keys. There are several ways to do this, the code below is just the most foolproof way for the purposes of this notebook. It accounts for both users who use environment variables and those who just want to set the API Key here in this notebook. - -[Get an AgentOps API key](https://agentops.ai/settings/projects) - -1. Create an environment variable in a .env file or other method. By default, the AgentOps `init()` function will look for an environment variable named `AGENTOPS_API_KEY`. Or... - -2. Replace `` below and pass in the optional `api_key` parameter to the AgentOps `init(api_key=...)` function. Remember not to commit your API key to a public repo! - - -```python load_dotenv() os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY", "your_api_key_here") os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "your_openai_api_key_here") -``` +# Ensure API key is available +openai_api_key = os.getenv("OPENAI_API_KEY") +if not openai_api_key: + raise ValueError("OPENAI_API_KEY environment variable is required") + +# Create the model client +model_client = OpenAIChatCompletionClient(model="gpt-4-turbo", api_key=openai_api_key, seed=42, temperature=0) -```python # When initializing AgentOps, you can pass in optional tags to help filter sessions -agentops.init(auto_start_session=False) -tracer = agentops.start_trace( - trace_name="Microsoft Agent Chat Example", tags=["autogen-chat", "microsoft-autogen", "agentops-example"] +agentops.init(trace_name="autogen-agent-chat", tags=["autogen-agent-chat", "agentops-example"]) + +# Define a simple calculator tool the assistant can use +Operator = Literal["+", "-", "*", "/"] +def calculator(a: int, b: int, operator: Annotated[Operator, "operator"]) -> int: + if operator == "+": + return a + b + elif operator == "-": + return a - b + elif operator == "*": + return a * b + elif operator == "/": + return int(a / b) + else: + raise ValueError("Invalid operator") + +# Create the assistant agent with access to the calculator tool +assistant = AssistantAgent( + name="Assistant", + system_message="You are a helpful AI assistant with access to a calculator tool. " + "When given a mathematical expression, you must break it down and use the calculator tool " + "for ALL arithmetic operations until you reach the final answer. " + "Do not stop until you have calculated the complete final result. " + "Show your work step by step and provide the final numerical answer. " + "Only say 'TERMINATE' after you have completed the entire calculation.", + model_client=model_client, + tools=[calculator], + reflect_on_tool_use=True, ) -``` -AutoGen will now start automatically tracking +# Create a user proxy agent to represent the human user +user_proxy = UserProxyAgent( + name="User", + description="A user proxy agent that represents the human user.", +) -* LLM prompts and completions -* Token usage and costs -* Agent names and actions -* Correspondence between agents -* Tool usage -* Errors +# Main async function to run the chat loop +async def main(): + # The initial question for the assistant to solve + initial_message = "What is (1423 - 123) / 3 + (32 + 23) * 5? Please complete the entire calculation step by step and show the final answer." + print(f"User: {initial_message}") -# Simple Chat Example + # Start the conversation history with the user's question + conversation_history = [TextMessage(content=initial_message, source="user")] + max_turns = 10 # Prevent infinite loops + turn = 0 -```python -# Define model and API key -model_name = "gpt-4-turbo" # Or "gpt-4o" / "gpt-4o-mini" as per migration guide examples -api_key = os.getenv("OPENAI_API_KEY") + # Conversation loop: user and assistant take turns + while turn < max_turns: + turn += 1 + print(f"\n--- Turn {turn} ---") -# Create the model client -model_client = OpenAIChatCompletionClient(model=model_name, api_key=api_key) + # Send the conversation so far to the assistant and get a response + response = await assistant.on_messages(conversation_history, CancellationToken()) -# Create the agent that uses the LLM. -assistant = AssistantAgent( - name="assistant", - system_message="You are a helpful assistant.", # Added system message for clarity - model_client=model_client, -) + if response.chat_message: + assistant_message = response.chat_message + print(f"Assistant: {assistant_message.to_text()}") -user_proxy_initiator = UserProxyAgent("user_initiator") + # Check if the assistant has finished the calculation + message_text = assistant_message.to_text() + if "TERMINATE" in message_text or "final answer" in message_text.lower(): + print("Calculation completed!") + break + # Add assistant's message to the conversation for the next turn + conversation_history.append(TextMessage(content=message_text, source="assistant")) -async def main(): - termination = MaxMessageTermination(max_messages=2) - - group_chat = RoundRobinGroupChat( - [user_proxy_initiator, assistant], # Corrected: agents as positional argument - termination_condition=termination, - ) - - chat_task = "How can I help you today?" - print(f"User Initiator: {chat_task}") - - try: - stream = group_chat.run_stream(task=chat_task) - await Console().run(stream) - agentops.end_trace(tracer, end_state="Success") - - except StdinNotImplementedError: - print("StdinNotImplementedError: This typically happens in non-interactive environments.") - print("Skipping interactive part of chat for automation.") - agentops.end_trace(tracer, end_state="Indeterminate") - except Exception as e: - print(f"An error occurred: {e}") - agentops.end_trace(tracer, end_state="Error") - finally: - await model_client.close() - - -if __name__ == "__main__": - try: - loop = asyncio.get_running_loop() - except RuntimeError: - loop = None - - if loop and loop.is_running(): - import nest_asyncio - - nest_asyncio.apply() - asyncio.run(main()) - else: - asyncio.run(main()) + # If not done, prompt the assistant to continue + if turn < max_turns - 1: + continue_prompt = "Please continue with the remaining calculations to get the final answer." + conversation_history.append(TextMessage(content=continue_prompt, source="user")) + print(f"User: {continue_prompt}") + + else: + print("Assistant did not provide a response.") + break + +nest_asyncio.apply() +asyncio.run(main()) ``` You can view data on this run at [app.agentops.ai](https://app.agentops.ai). diff --git a/docs/v2/integrations/autogen.mdx b/docs/v2/integrations/autogen.mdx index 9ca346292..19a6b770a 100644 --- a/docs/v2/integrations/autogen.mdx +++ b/docs/v2/integrations/autogen.mdx @@ -57,358 +57,103 @@ os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY") AgentOps automatically instruments AutoGen agents and tracks their interactions. Simply initialize AgentOps before creating your AutoGen agents! -```python Countdown +```python +# Then import them +from typing import Any, Dict, List import asyncio -from dataclasses import dataclass -from typing import Callable +from autogen_agentchat.agents import AssistantAgent +from autogen_agentchat.conditions import HandoffTermination, TextMentionTermination +from autogen_agentchat.messages import HandoffMessage +from autogen_agentchat.teams import Swarm +from autogen_agentchat.ui import Console +from autogen_ext.models.openai import OpenAIChatCompletionClient +import os import agentops +from dotenv import load_dotenv +import nest_asyncio -from autogen_core import ( - DefaultTopicId, - MessageContext, - RoutedAgent, - default_subscription, - message_handler, - AgentId, - SingleThreadedAgentRuntime +# Load environment variables (like API keys) +load_dotenv() +# Set up AgentOps to track everything that happens in this session +agentops.init(auto_start_session=False, tags=["autogen-swarm-team", "agentops-example"]) +tracer = agentops.start_trace(trace_name="autogen-swarm-team") + +# This is a pretend tool that "refunds" a flight when given a flight ID. +def refund_flight(flight_id: str) -> str: + """Refund a flight""" + return f"Flight {flight_id} refunded" + +# Set up the AI model client (the brain for the agents) +model_client = OpenAIChatCompletionClient( + model="gpt-4o", + api_key=os.getenv("OPENAI_API_KEY", "your_openai_api_key_here"), ) -# Initialize AgentOps -agentops.init() - -@dataclass -class CountdownMessage: - """Message containing a number for countdown operations""" - content: int - -@default_subscription -class ModifierAgent(RoutedAgent): - """Agent that modifies numbers by applying a transformation function""" - - def __init__(self, modify_val: Callable[[int], int]) -> None: - super().__init__("A modifier agent that transforms numbers.") - self._modify_val = modify_val - - @message_handler - async def handle_message(self, message: CountdownMessage, ctx: MessageContext) -> None: - """Handle incoming messages and apply modification""" - original_val = message.content - modified_val = self._modify_val(original_val) - - print(f"šŸ”§ ModifierAgent: Transformed {original_val} → {modified_val}") - - # Publish the modified value to continue the workflow - await self.publish_message( - CountdownMessage(content=modified_val), - DefaultTopicId() - ) - -@default_subscription -class CheckerAgent(RoutedAgent): - """Agent that checks if a condition is met and decides whether to continue""" - - def __init__(self, stop_condition: Callable[[int], bool]) -> None: - super().__init__("A checker agent that validates conditions.") - self._stop_condition = stop_condition - - @message_handler - async def handle_message(self, message: CountdownMessage, ctx: MessageContext) -> None: - """Handle incoming messages and check stopping condition""" - value = message.content - - if not self._stop_condition(value): - print(f"āœ… CheckerAgent: {value} passed validation, continuing workflow") - # Continue the workflow by publishing the message - await self.publish_message( - CountdownMessage(content=value), - DefaultTopicId() - ) - else: - print(f"šŸ›‘ CheckerAgent: {value} failed validation, stopping workflow") - print("šŸŽ‰ Countdown completed successfully!") - -async def run_countdown_workflow(): - """Run a countdown workflow from 10 to 1 using AutoGen agents""" - - print("šŸš€ Starting AutoGen Countdown Workflow") - print("=" * 50) - - # Create the AutoGen runtime - runtime = SingleThreadedAgentRuntime() - - # Register the modifier agent (subtracts 1 from each number) - await ModifierAgent.register( - runtime, - "modifier", - lambda: ModifierAgent(modify_val=lambda x: x - 1), - ) - - # Register the checker agent (stops when value <= 1) - await CheckerAgent.register( - runtime, - "checker", - lambda: CheckerAgent(stop_condition=lambda x: x <= 1), - ) - - # Start the runtime - runtime.start() - print("šŸ¤– AutoGen runtime started") - print("šŸ“Ø Sending initial message with value: 10") - - # Send initial message to start the countdown - await runtime.send_message( - CountdownMessage(10), - AgentId("checker", "default") - ) - - # Wait for the workflow to complete - await runtime.stop_when_idle() - - print("=" * 50) - print("✨ Workflow completed! Check your AgentOps dashboard for detailed traces.") - -# Run the workflow -if __name__ == "__main__": - asyncio.run(run_countdown_workflow()) -``` - -```python Multi-Agent -import asyncio -from dataclasses import dataclass -from typing import List, Dict, Any -import agentops - -from autogen_core import ( - DefaultTopicId, - MessageContext, - RoutedAgent, - default_subscription, - message_handler, - AgentId, - SingleThreadedAgentRuntime +# The travel agent helps with travel tasks and can hand off to the flights_refunder agent. +travel_agent = AssistantAgent( + "travel_agent", + model_client=model_client, + handoffs=["flights_refunder", ""], + system_message="""You are a travel agent. + The flights_refunder is in charge of refunding flights. + If you need information from the user, you must first send your message, then you can handoff to the user. + Use TERMINATE when the travel planning is complete.""", ) -# Initialize AgentOps -agentops.init() - -@dataclass -class DataMessage: - """Message containing data to be processed""" - data: List[Dict[str, Any]] - stage: str - metadata: Dict[str, Any] - -@default_subscription -class DataCollectorAgent(RoutedAgent): - """Agent responsible for collecting and preparing initial data""" - - def __init__(self) -> None: - super().__init__("Data collector agent that gathers initial dataset.") - - @message_handler - async def handle_message(self, message: DataMessage, ctx: MessageContext) -> None: - print(f"šŸ“Š DataCollector: Collecting data for {message.metadata.get('source', 'unknown')}") - - # Simulate data collection - collected_data = [ - {"id": 1, "value": 100, "category": "A"}, - {"id": 2, "value": 200, "category": "B"}, - {"id": 3, "value": 150, "category": "A"}, - {"id": 4, "value": 300, "category": "C"}, - ] - - print(f"āœ… DataCollector: Collected {len(collected_data)} records") - - # Send to processor - await self.publish_message( - DataMessage( - data=collected_data, - stage="processing", - metadata={**message.metadata, "collected_count": len(collected_data)} - ), - DefaultTopicId() - ) - -@default_subscription -class DataProcessorAgent(RoutedAgent): - """Agent that processes and transforms data""" - - def __init__(self) -> None: - super().__init__("Data processor agent that transforms collected data.") - - @message_handler - async def handle_message(self, message: DataMessage, ctx: MessageContext) -> None: - if message.stage != "processing": - return - - print(f"āš™ļø DataProcessor: Processing {len(message.data)} records") - - # Process data - add calculated fields - processed_data = [] - for item in message.data: - processed_item = { - **item, - "processed_value": item["value"] * 1.1, # 10% increase - "status": "processed" - } - processed_data.append(processed_item) - - print(f"āœ… DataProcessor: Processed {len(processed_data)} records") - - # Send to analyzer - await self.publish_message( - DataMessage( - data=processed_data, - stage="analysis", - metadata={**message.metadata, "processed_count": len(processed_data)} - ), - DefaultTopicId() - ) - -@default_subscription -class DataAnalyzerAgent(RoutedAgent): - """Agent that analyzes processed data and generates insights""" - - def __init__(self) -> None: - super().__init__("Data analyzer agent that generates insights.") +# The flights_refunder agent specializes in refunding flights and can use the refund_flight tool. +flights_refunder = AssistantAgent( + "flights_refunder", + model_client=model_client, + handoffs=["travel_agent", "user"], + tools=[refund_flight], + system_message="""You are an agent specialized in refunding flights. + You only need flight reference numbers to refund a flight. + You have the ability to refund a flight using the refund_flight tool. + If you need information from the user, you must first send your message, then you can handoff to the user. + When the transaction is complete, handoff to the travel agent to finalize.""", +) - @message_handler - async def handle_message(self, message: DataMessage, ctx: MessageContext) -> None: - if message.stage != "analysis": - return - - print(f"🧠 DataAnalyzer: Analyzing {len(message.data)} records") - - # Perform analysis - total_value = sum(item["processed_value"] for item in message.data) - avg_value = total_value / len(message.data) - categories = set(item["category"] for item in message.data) - - analysis_results = { - "total_records": len(message.data), - "total_value": total_value, - "average_value": avg_value, - "unique_categories": len(categories), - "categories": list(categories) - } - - print(f"šŸ“ˆ DataAnalyzer: Analysis complete") - print(f" • Total records: {analysis_results['total_records']}") - print(f" • Average value: {analysis_results['average_value']:.2f}") - print(f" • Categories: {', '.join(analysis_results['categories'])}") - - # Send to reporter - await self.publish_message( - DataMessage( - data=message.data, - stage="reporting", - metadata={ - **message.metadata, - "analysis": analysis_results - } - ), - DefaultTopicId() +# These rules decide when the conversation should stop: +# - If the user is handed the conversation (handoff to user), or +# - If someone says 'TERMINATE' in the chat +termination = HandoffTermination(target="user") | TextMentionTermination("TERMINATE") +# Put both agents together into a "Swarm" team so they can work together. +team = Swarm([travel_agent, flights_refunder], termination_condition=termination) +# This is the task the user wants help with. +task = "I need to refund my flight." + +# This function runs the team and handles the back-and-forth with the user. +async def run_team_stream() -> None: + task_result = await Console(team.run_stream(task=task)) + last_message = task_result.messages[-1] + + # These are the user's replies, sent automatically to keep the example running. + scripted_responses = [ + "My flight reference is ABC123.", + "Yes, thank you. TERMINATE", + ] + response_index = 0 + + # Keep going as long as the agents hand the conversation to the user. + while isinstance(last_message, HandoffMessage) and last_message.target == "user": + if response_index >= len(scripted_responses): + break # Stop if we run out of replies + + user_message = scripted_responses[response_index] + response_index += 1 + + task_result = await Console( + team.run_stream( + task=HandoffMessage( + source="user", target=last_message.source, content=user_message + ) + ) ) + last_message = task_result.messages[-1] -@default_subscription -class ReportGeneratorAgent(RoutedAgent): - """Agent that generates final reports""" - - def __init__(self) -> None: - super().__init__("Report generator agent that creates final output.") - - @message_handler - async def handle_message(self, message: DataMessage, ctx: MessageContext) -> None: - if message.stage != "reporting": - return - - print(f"šŸ“ ReportGenerator: Generating final report") - - analysis = message.metadata.get("analysis", {}) - - report = f""" -šŸŽÆ DATA PROCESSING REPORT -======================== -Source: {message.metadata.get('source', 'Unknown')} -Processing Date: {message.metadata.get('timestamp', 'Unknown')} - -šŸ“Š SUMMARY STATISTICS: -• Total Records Processed: {analysis.get('total_records', 0)} -• Total Value: ${analysis.get('total_value', 0):,.2f} -• Average Value: ${analysis.get('average_value', 0):,.2f} -• Unique Categories: {analysis.get('unique_categories', 0)} -• Categories Found: {', '.join(analysis.get('categories', []))} - -āœ… Processing pipeline completed successfully! - """ - - print(report) - print("šŸŽ‰ Multi-agent data processing workflow completed!") - -async def run_data_processing_pipeline(): - """Run a complete data processing pipeline using multiple AutoGen agents""" - - print("šŸš€ Starting AutoGen Data Processing Pipeline") - print("=" * 60) - - # Create runtime - runtime = SingleThreadedAgentRuntime() - - # Register all agents - await DataCollectorAgent.register( - runtime, - "collector", - lambda: DataCollectorAgent(), - ) - - await DataProcessorAgent.register( - runtime, - "processor", - lambda: DataProcessorAgent(), - ) - - await DataAnalyzerAgent.register( - runtime, - "analyzer", - lambda: DataAnalyzerAgent(), - ) - - await ReportGeneratorAgent.register( - runtime, - "reporter", - lambda: ReportGeneratorAgent(), - ) - - # Start runtime - runtime.start() - print("šŸ¤– AutoGen runtime with 4 agents started") - - # Trigger the pipeline - initial_message = DataMessage( - data=[], - stage="collection", - metadata={ - "source": "customer_database", - "timestamp": "2024-01-15T10:30:00Z", - "pipeline_id": "data_proc_001" - } - ) - - print("šŸ“Ø Triggering data processing pipeline...") - await runtime.send_message( - initial_message, - AgentId("collector", "default") - ) - - # Wait for completion - await runtime.stop_when_idle() - - print("=" * 60) - print("✨ Pipeline completed! Check AgentOps dashboard for detailed agent traces.") - -# Run the pipeline -if __name__ == "__main__": - asyncio.run(run_data_processing_pipeline()) +# Start the team and let the agents and user work together to solve the problem. +nest_asyncio.apply() +asyncio.run(run_team_stream()) ``` @@ -416,11 +161,14 @@ if __name__ == "__main__": - Basic multi-agent chat functionality + Multi-Agent chat functionality with tool calls Demonstrates an agent specialized for mathematical problem-solving. + + This GroupChat example shows how you can make several AI agents work together as a team to solve a problem. + Visit your [AgentOps Dashboard](https://app.agentops.ai) to see detailed traces of your AutoGen agent interactions, performance metrics, and workflow analytics. diff --git a/examples/autogen/AgentChat.ipynb b/examples/autogen/AgentChat.ipynb index ef81f2243..fb7490fa4 100644 --- a/examples/autogen/AgentChat.ipynb +++ b/examples/autogen/AgentChat.ipynb @@ -1,226 +1,214 @@ { "cells": [ { - "attachments": {}, "cell_type": "markdown", - "id": "bb6538d8-2a5d-4a99-b2c1-7130963e4f7b", + "id": "89cbe72f", "metadata": {}, "source": [ - "# Microsoft Autogen Chat Example\n", - "\n", - "AgentOps automatically configures itself when it's initialized meaning your agent run data will be tracked and logged to your AgentOps dashboard right away." - ] - }, - { - "cell_type": "markdown", - "id": "87626697", - "metadata": {}, - "source": [ - "First let's install the required packages" + "Microsoft Autogen Assistant Chat Example\n", + "\n", + "Overview\n", + "This script demonstrates how to build an interactive AI assistant chat using Microsoft Autogen and AgentOps.\n", + "You will see how to:\n", + " - Initialize an assistant agent with OpenAI's GPT-4 model\n", + " - Equip the agent with a calculator tool for step-by-step math problem solving\n", + " - Track and log all agent interactions automatically with AgentOps\n", + " - Simulate a conversation loop where the assistant and user exchange messages until the task is complete\n", + "This approach is useful for building conversational AI agents that can reason through complex tasks, show their work, and provide transparent, auditable results.\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "9599cf93", + "id": "077a64ac", "metadata": {}, "outputs": [], "source": [ - "%pip install -U autogen-agentchat\n", + "# AgentOps ensures all actions are tracked, making it easy to monitor, debug, and analyze your agent's performance in real time.\n", + "#\n", + "# First let's install the required packages\n", + "%pip install -U \"ag2[autogen-agentchat]\"\n", "%pip install -U \"autogen-ext[openai]\"\n", "%pip install -U agentops\n", - "%pip install -U python-dotenv" + "%pip install -U python-dotenv\n", + "%pip install -U nest_asyncio\n" ] }, { "cell_type": "markdown", - "id": "3506f401", - "metadata": {}, + "id": "abfad833", + "metadata": { + "lines_to_next_cell": 2 + }, "source": [ - "Then import them" + "Next, we'll set our API keys. There are several ways to do this, the code below is just the most foolproof way for the purposes of this notebook. It accounts for both users who use environment variables and those who just want to set the API Key here in this notebook.\n", + "\n", + "[Get an AgentOps API key](https://agentops.ai/settings/projects)\n", + "\n", + "1. Create an environment variable in a .env file or other method. By default, the AgentOps `init()` function will look for an environment variable named `AGENTOPS_API_KEY`. Or...\n", + "\n", + "2. Replace `` below and pass in the optional `api_key` parameter to the AgentOps `init(api_key=...)` function. Remember not to commit your API key to a public repo!" ] }, { "cell_type": "code", "execution_count": null, - "id": "1b5c8b7b", + "id": "ef33289f", "metadata": {}, "outputs": [], "source": [ "import os\n", - "from dotenv import load_dotenv\n", - "from IPython.core.error import (\n", - " StdinNotImplementedError,\n", - ")\n", "import asyncio\n", - "\n", + "from dotenv import load_dotenv\n", + "from typing import Annotated, Literal\n", "import agentops\n", - "\n", + "import nest_asyncio\n", "from autogen_agentchat.agents import AssistantAgent, UserProxyAgent\n", "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_agentchat.conditions import MaxMessageTermination\n", - "from autogen_agentchat.ui import Console" - ] - }, - { - "cell_type": "markdown", - "id": "70b502a2", - "metadata": {}, - "source": [ - "Next, we'll set our API keys. There are several ways to do this, the code below is just the most foolproof way for the purposes of this notebook. It accounts for both users who use environment variables and those who just want to set the API Key here in this notebook.\n", - "\n", - "[Get an AgentOps API key](https://agentops.ai/settings/projects)\n", - "\n", - "1. Create an environment variable in a .env file or other method. By default, the AgentOps `init()` function will look for an environment variable named `AGENTOPS_API_KEY`. Or...\n", - "\n", - "2. Replace `` below and pass in the optional `api_key` parameter to the AgentOps `init(api_key=...)` function. Remember not to commit your API key to a public repo!" + "from autogen_agentchat.messages import TextMessage\n", + "from autogen_core import CancellationToken" ] }, { "cell_type": "code", "execution_count": null, - "id": "7ae4152d", + "id": "a352fb64", "metadata": {}, "outputs": [], "source": [ "load_dotenv()\n", "os.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_api_key_here\")\n", - "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\")" + "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\")\n", + "# Ensure API key is available\n", + "openai_api_key = os.getenv(\"OPENAI_API_KEY\")\n", + "if not openai_api_key:\n", + " raise ValueError(\"OPENAI_API_KEY environment variable is required\")\n", + "# Create the model client\n", + "model_client = OpenAIChatCompletionClient(model=\"gpt-4-turbo\", api_key=openai_api_key, seed=42, temperature=0)\n", + "# When initializing AgentOps, you can pass in optional tags to help filter sessions\n", + "agentops.init(trace_name=\"autogen-agent-chat\", tags=[\"autogen-agent-chat\", \"agentops-example\"])" ] }, { "cell_type": "code", "execution_count": null, - "id": "d93f2339-7b99-4cf1-9232-c24faba49c7b", - "metadata": {}, + "id": "7a2c1961", + "metadata": { + "lines_to_next_cell": 1 + }, "outputs": [], "source": [ - "# When initializing AgentOps, you can pass in optional tags to help filter sessions\n", - "agentops.init(auto_start_session=False)\n", - "tracer = agentops.start_trace(\n", - " trace_name=\"Microsoft Agent Chat Example\", tags=[\"autogen-chat\", \"microsoft-autogen\", \"agentops-example\"]\n", - ")" + "# Define a simple calculator tool the assistant can use\n", + "Operator = Literal[\"+\", \"-\", \"*\", \"/\"]\n", + "def calculator(a: int, b: int, operator: Annotated[Operator, \"operator\"]) -> int:\n", + " if operator == \"+\":\n", + " return a + b\n", + " elif operator == \"-\":\n", + " return a - b\n", + " elif operator == \"*\":\n", + " return a * b\n", + " elif operator == \"/\":\n", + " return int(a / b)\n", + " else:\n", + " raise ValueError(\"Invalid operator\")\n", + "# Create the assistant agent with access to the calculator tool\n", + "assistant = AssistantAgent(\n", + " name=\"Assistant\",\n", + " system_message=\"You are a helpful AI assistant with access to a calculator tool. \"\n", + " \"When given a mathematical expression, you must break it down and use the calculator tool \"\n", + " \"for ALL arithmetic operations until you reach the final answer. \"\n", + " \"Do not stop until you have calculated the complete final result. \"\n", + " \"Show your work step by step and provide the final numerical answer. \"\n", + " \"Only say 'TERMINATE' after you have completed the entire calculation.\",\n", + " model_client=model_client,\n", + " tools=[calculator],\n", + " reflect_on_tool_use=True,\n", + ")\n", + "# Create a user proxy agent to represent the human user\n", + "user_proxy = UserProxyAgent(\n", + " name=\"User\",\n", + " description=\"A user proxy agent that represents the human user.\",\n", + ")\n", + "# Main async function to run the chat loop\n", + "async def main():\n", + " # The initial question for the assistant to solve\n", + " initial_message = \"What is (1423 - 123) / 3 + (32 + 23) * 5? Please complete the entire calculation step by step and show the final answer.\"\n", + " print(f\"User: {initial_message}\")\n", + "\n", + " # Start the conversation history with the user's question\n", + " conversation_history = [TextMessage(content=initial_message, source=\"user\")]\n", + "\n", + " max_turns = 10 # Prevent infinite loops\n", + " turn = 0\n", + "\n", + " # Conversation loop: user and assistant take turns\n", + " while turn < max_turns:\n", + " turn += 1\n", + " print(f\"\\n--- Turn {turn} ---\")\n", + "\n", + " # Send the conversation so far to the assistant and get a response\n", + " response = await assistant.on_messages(conversation_history, CancellationToken())\n", + "\n", + " if response.chat_message:\n", + " assistant_message = response.chat_message\n", + " print(f\"Assistant: {assistant_message.to_text()}\")\n", + "\n", + " # Check if the assistant has finished the calculation\n", + " message_text = assistant_message.to_text()\n", + " if \"TERMINATE\" in message_text or \"final answer\" in message_text.lower():\n", + " print(\"Calculation completed!\")\n", + " break\n", + "\n", + " # Add assistant's message to the conversation for the next turn\n", + " conversation_history.append(TextMessage(content=message_text, source=\"assistant\"))\n", + "\n", + " # If not done, prompt the assistant to continue\n", + " if turn < max_turns - 1:\n", + " continue_prompt = \"Please continue with the remaining calculations to get the final answer.\"\n", + " conversation_history.append(TextMessage(content=continue_prompt, source=\"user\"))\n", + " print(f\"User: {continue_prompt}\")\n", + "\n", + " else:\n", + " print(\"Assistant did not provide a response.\")\n", + " break" ] }, { - "cell_type": "markdown", - "id": "7858f0f6-9aca-4cdb-a514-9fbf7e353d50", + "cell_type": "code", + "execution_count": null, + "id": "b3da08e8", "metadata": {}, + "outputs": [], "source": [ - "AutoGen will now start automatically tracking\n", - "\n", - "* LLM prompts and completions\n", - "* Token usage and costs\n", - "* Agent names and actions\n", - "* Correspondence between agents\n", - "* Tool usage\n", - "* Errors" + "nest_asyncio.apply()\n", + "asyncio.run(main())" ] }, { "cell_type": "markdown", - "id": "e875dd0c", - "metadata": {}, - "source": [ - "# Simple Chat Example" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2962d990-f7ef-43d8-ba09-d29bd8356d9f", + "id": "833abea8", "metadata": {}, - "outputs": [], "source": [ - "# Define model and API key\n", - "model_name = \"gpt-4-turbo\" # Or \"gpt-4o\" / \"gpt-4o-mini\" as per migration guide examples\n", - "api_key = os.getenv(\"OPENAI_API_KEY\")\n", - "\n", - "# Create the model client\n", - "model_client = OpenAIChatCompletionClient(model=model_name, api_key=api_key)\n", - "\n", - "# Create the agent that uses the LLM.\n", - "assistant = AssistantAgent(\n", - " name=\"assistant\",\n", - " system_message=\"You are a helpful assistant.\", # Added system message for clarity\n", - " model_client=model_client,\n", - ")\n", - "\n", - "user_proxy_initiator = UserProxyAgent(\"user_initiator\")\n", - "\n", - "\n", - "async def main():\n", - " termination = MaxMessageTermination(max_messages=2)\n", - "\n", - " group_chat = RoundRobinGroupChat(\n", - " [user_proxy_initiator, assistant], # Corrected: agents as positional argument\n", - " termination_condition=termination,\n", - " )\n", - "\n", - " chat_task = \"How can I help you today?\"\n", - " print(f\"User Initiator: {chat_task}\")\n", - "\n", - " try:\n", - " stream = group_chat.run_stream(task=chat_task)\n", - " await Console().run(stream)\n", - " agentops.end_trace(tracer, end_state=\"Success\")\n", - "\n", - " except StdinNotImplementedError:\n", - " print(\"StdinNotImplementedError: This typically happens in non-interactive environments.\")\n", - " print(\"Skipping interactive part of chat for automation.\")\n", - " agentops.end_trace(tracer, end_state=\"Indeterminate\")\n", - " except Exception as e:\n", - " print(f\"An error occurred: {e}\")\n", - " agentops.end_trace(tracer, end_state=\"Error\")\n", - " finally:\n", - " await model_client.close()\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " try:\n", - " loop = asyncio.get_running_loop()\n", - " except RuntimeError:\n", - " loop = None\n", - "\n", - " if loop and loop.is_running():\n", - " import nest_asyncio\n", - "\n", - " nest_asyncio.apply()\n", - " asyncio.run(main())\n", - " else:\n", - " asyncio.run(main())" + "You can view data on this run at [app.agentops.ai](app.agentops.ai)." ] }, { "cell_type": "markdown", - "id": "7b422137-903a-41ef-a4ca-95b50aea4138", + "id": "9502c9e8", "metadata": {}, "source": [ - "You can view data on this run at [app.agentops.ai](https://app.agentops.ai).\n", - "\n", "The dashboard will display LLM events for each message sent by each agent, including those made by the human user." ] } ], "metadata": { - "kernelspec": { - "display_name": "agentops (3.11.11)", - "language": "python", - "name": "python3" + "jupytext": { + "cell_metadata_filter": "-all", + "main_language": "python", + "notebook_metadata_filter": "-all" }, "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" + "name": "python" } }, "nbformat": 4, diff --git a/examples/autogen/AgentChat.py b/examples/autogen/AgentChat.py index 347d9586f..d0de2c12c 100644 --- a/examples/autogen/AgentChat.py +++ b/examples/autogen/AgentChat.py @@ -1,25 +1,24 @@ -# Microsoft Autogen Multi-Agent Collaboration Example +# Microsoft Autogen Assistant Chat Example +# +# Overview +# This script demonstrates how to build an interactive AI assistant chat using Microsoft Autogen and AgentOps. +# You will see how to: +# - Initialize an assistant agent with OpenAI's GPT-4 model +# - Equip the agent with a calculator tool for step-by-step math problem solving +# - Track and log all agent interactions automatically with AgentOps +# - Simulate a conversation loop where the assistant and user exchange messages until the task is complete +# This approach is useful for building conversational AI agents that can reason through complex tasks, show their work, and provide transparent, auditable results. +# + +# AgentOps ensures all actions are tracked, making it easy to monitor, debug, and analyze your agent's performance in real time. # -# This example demonstrates AI-to-AI collaboration using multiple specialized agents working together without human interaction. -# AgentOps automatically configures itself when it's initialized meaning your agent run data will be tracked and logged to your AgentOps dashboard right away. # First let's install the required packages -# %pip install -U autogen-agentchat +# %pip install -U "ag2[autogen-agentchat]" # %pip install -U "autogen-ext[openai]" # %pip install -U agentops # %pip install -U python-dotenv -# Then import them -import os -from dotenv import load_dotenv -import asyncio +# %pip install -U nest_asyncio -import agentops - -from autogen_agentchat.agents import AssistantAgent -from autogen_ext.models.openai import OpenAIChatCompletionClient - -from autogen_agentchat.teams import RoundRobinGroupChat -from autogen_agentchat.conditions import MaxMessageTermination -from autogen_agentchat.ui import Console # Next, we'll set our API keys. There are several ways to do this, the code below is just the most foolproof way for the purposes of this notebook. It accounts for both users who use environment variables and those who just want to set the API Key here in this notebook. # @@ -28,105 +27,115 @@ # 1. Create an environment variable in a .env file or other method. By default, the AgentOps `init()` function will look for an environment variable named `AGENTOPS_API_KEY`. Or... # # 2. Replace `` below and pass in the optional `api_key` parameter to the AgentOps `init(api_key=...)` function. Remember not to commit your API key to a public repo! + + +import os +import asyncio +from dotenv import load_dotenv +from typing import Annotated, Literal +import agentops +import nest_asyncio +from autogen_agentchat.agents import AssistantAgent, UserProxyAgent +from autogen_ext.models.openai import OpenAIChatCompletionClient +from autogen_agentchat.messages import TextMessage +from autogen_core import CancellationToken + load_dotenv() os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY", "your_api_key_here") os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "your_openai_api_key_here") - +# Ensure API key is available +openai_api_key = os.getenv("OPENAI_API_KEY") +if not openai_api_key: + raise ValueError("OPENAI_API_KEY environment variable is required") +# Create the model client +model_client = OpenAIChatCompletionClient(model="gpt-4-turbo", api_key=openai_api_key, seed=42, temperature=0) # When initializing AgentOps, you can pass in optional tags to help filter sessions -agentops.init(auto_start_session=False, trace_name="Autogen Multi-Agent Collaboration Example") -tracer = agentops.start_trace( - trace_name="Microsoft Multi-Agent Collaboration Example", - tags=["autogen-collaboration", "microsoft-autogen", "agentops-example"], -) +agentops.init(trace_name="autogen-agent-chat", tags=["autogen-agent-chat", "agentops-example"]) -# AutoGen will now start automatically tracking -# -# * LLM prompts and completions -# * Token usage and costs -# * Agent names and actions -# * Correspondence between agents -# * Tool usage -# * Errors -# # Multi-Agent Collaboration Example -# Define model and API key -model_name = "gpt-4o-mini" # Or "gpt-4o" / "gpt-4o-mini" as per migration guide examples -api_key = os.getenv("OPENAI_API_KEY") +# Define a simple calculator tool the assistant can use +Operator = Literal["+", "-", "*", "/"] -# Create the model client -model_client = OpenAIChatCompletionClient(model=model_name, api_key=api_key) -# Create multiple AI agents with different roles -research_agent = AssistantAgent( - name="research_agent", - system_message="You are a research specialist. Your role is to gather information, analyze data, and provide insights on topics. You ask thoughtful questions and provide well-researched responses.", +def calculator(a: int, b: int, operator: Annotated[Operator, "operator"]) -> int: + if operator == "+": + return a + b + elif operator == "-": + return a - b + elif operator == "*": + return a * b + elif operator == "/": + return int(a / b) + else: + raise ValueError("Invalid operator") + + +# Create the assistant agent with access to the calculator tool +assistant = AssistantAgent( + name="Assistant", + system_message="You are a helpful AI assistant with access to a calculator tool. " + "When given a mathematical expression, you must break it down and use the calculator tool " + "for ALL arithmetic operations until you reach the final answer. " + "Do not stop until you have calculated the complete final result. " + "Show your work step by step and provide the final numerical answer. " + "Only say 'TERMINATE' after you have completed the entire calculation.", model_client=model_client, + tools=[calculator], + reflect_on_tool_use=True, ) - -creative_agent = AssistantAgent( - name="creative_agent", - system_message="You are a creative strategist. Your role is to brainstorm innovative solutions, think outside the box, and propose creative approaches to problems. You build on others' ideas and suggest novel perspectives.", - model_client=model_client, -) - -analyst_agent = AssistantAgent( - name="analyst_agent", - system_message="You are a critical analyst. Your role is to evaluate ideas, identify strengths and weaknesses, and provide constructive feedback. You help refine concepts and ensure practical feasibility.", - model_client=model_client, +# Create a user proxy agent to represent the human user +user_proxy = UserProxyAgent( + name="User", + description="A user proxy agent that represents the human user.", ) +# Main async function to run the chat loop async def main(): - # Set up a longer conversation to allow for meaningful AI-to-AI interaction - termination = MaxMessageTermination(max_messages=8) - - group_chat = RoundRobinGroupChat( - [research_agent, creative_agent, analyst_agent], # AI agents working together - termination_condition=termination, - ) - - # A task that will engage all three agents in meaningful collaboration - chat_task = "Let's develop a comprehensive strategy for reducing plastic waste in urban environments. I need research on current methods, creative solutions, and analysis of feasibility." - print(f"šŸŽÆ Task: {chat_task}") - print("\n" + "=" * 80) - print("šŸ¤– AI Agents Collaboration Starting...") - print("=" * 80) - - try: - stream = group_chat.run_stream(task=chat_task) - await Console(stream=stream) - agentops.end_trace(tracer, end_state="Success") - - except Exception as e: - print(f"An error occurred: {e}") - agentops.end_trace(tracer, end_state="Error") - finally: - await model_client.close() - - # Let's check programmatically that spans were recorded in AgentOps - print("\n" + "=" * 50) - print("Now let's verify that our LLM calls were tracked properly...") - try: - agentops.validate_trace_spans(trace_context=tracer) - print("\nāœ… Success! All LLM spans were properly recorded in AgentOps.") - except agentops.ValidationError as e: - print(f"\nāŒ Error validating spans: {e}") - raise - - -if __name__ == "__main__": - try: - loop = asyncio.get_running_loop() - except RuntimeError: - loop = None - - if loop and loop.is_running(): - import nest_asyncio - - nest_asyncio.apply() - asyncio.run(main()) - else: - asyncio.run(main()) + # The initial question for the assistant to solve + initial_message = "What is (1423 - 123) / 3 + (32 + 23) * 5? Please complete the entire calculation step by step and show the final answer." + print(f"User: {initial_message}") + + # Start the conversation history with the user's question + conversation_history = [TextMessage(content=initial_message, source="user")] + + max_turns = 10 # Prevent infinite loops + turn = 0 + + # Conversation loop: user and assistant take turns + while turn < max_turns: + turn += 1 + print(f"\n--- Turn {turn} ---") + + # Send the conversation so far to the assistant and get a response + response = await assistant.on_messages(conversation_history, CancellationToken()) + + if response.chat_message: + assistant_message = response.chat_message + print(f"Assistant: {assistant_message.to_text()}") + + # Check if the assistant has finished the calculation + message_text = assistant_message.to_text() + if "TERMINATE" in message_text or "final answer" in message_text.lower(): + print("Calculation completed!") + break + + # Add assistant's message to the conversation for the next turn + conversation_history.append(TextMessage(content=message_text, source="assistant")) + + # If not done, prompt the assistant to continue + if turn < max_turns - 1: + continue_prompt = "Please continue with the remaining calculations to get the final answer." + conversation_history.append(TextMessage(content=continue_prompt, source="user")) + print(f"User: {continue_prompt}") + + else: + print("Assistant did not provide a response.") + break + + +nest_asyncio.apply() +asyncio.run(main()) # You can view data on this run at [app.agentops.ai](app.agentops.ai). -# -# The dashboard will display LLM events for each message sent by each agent, showing the full AI-to-AI collaboration process with research, creative, and analytical perspectives. + +# The dashboard will display LLM events for each message sent by each agent, including those made by the human user. diff --git a/examples/autogen/GroupChatTeam.ipynb b/examples/autogen/GroupChatTeam.ipynb new file mode 100644 index 000000000..1ac032d30 --- /dev/null +++ b/examples/autogen/GroupChatTeam.ipynb @@ -0,0 +1,253 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "9c917c74", + "metadata": {}, + "outputs": [], + "source": [ + "# Microsoft Autogen Group Chat Example\n", + "#\n", + "# AgentOps automatically configures itself when it's initialized meaning your agent run data will be tracked and logged to your AgentOps dashboard right away.\n", + "# First let's install the required packages\n", + "%pip install -U \"ag2[autogen-agentchat]\"\n", + "%pip install -U \"autogen-ext[openai]\"\n", + "%pip install -U agentops\n", + "%pip install -U python-dotenv\n", + "%pip install -U nest_asyncio\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "622246d5", + "metadata": {}, + "outputs": [], + "source": [ + "import asyncio\n", + "from typing import List, Sequence\n", + "from autogen_agentchat.agents import AssistantAgent, UserProxyAgent\n", + "from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n", + "from autogen_agentchat.messages import BaseAgentEvent, BaseChatMessage\n", + "from autogen_agentchat.teams import SelectorGroupChat\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "import os\n", + "import agentops\n", + "from dotenv import load_dotenv\n", + "import nest_asyncio" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c7308bed", + "metadata": { + "lines_to_next_cell": 1 + }, + "outputs": [], + "source": [ + "load_dotenv()\n", + "agentops.init(auto_start_session=False,tags=[\"autogen-group-chat\", \"agentops-example\"])\n", + "tracer = agentops.start_trace(trace_name=\"autogen-group-chat\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7a45f514", + "metadata": {}, + "outputs": [], + "source": [ + "# Define mock tools for the agents:\n", + "# - search_web_tool: Simulates web search results for specific basketball queries (used by the WebSearchAgent).\n", + "# - percentage_change_tool: Calculates the percentage change between two numbers (used by the DataAnalystAgent).\n", + "def search_web_tool(query: str) -> str:\n", + " if \"2006-2007\" in query:\n", + " return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \"\"\"\n", + " elif \"2007-2008\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n", + " elif \"2008-2009\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n", + " return \"No data found.\"\n", + "def percentage_change_tool(start: float, end: float) -> float:\n", + " return ((end - start) / start) * 100" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "98b6ebaf", + "metadata": {}, + "outputs": [], + "source": [ + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "# Define the planning agent responsible for breaking down tasks and delegating them to other agents.\n", + "planning_agent = AssistantAgent(\n", + " \"PlanningAgent\",\n", + " description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a planning agent.\n", + " Your job is to break down complex tasks into smaller, manageable subtasks.\n", + " Your team members are:\n", + " WebSearchAgent: Searches for information\n", + " DataAnalystAgent: Performs calculations\n", + "\n", + " You only plan and delegate tasks - you do not execute them yourself.\n", + "\n", + " When assigning tasks, use this format:\n", + " 1. : \n", + "\n", + " After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n", + " \"\"\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "49bb4708", + "metadata": {}, + "outputs": [], + "source": [ + "# The web search agent that is responsible for retrieving information using the search tool.\n", + "web_search_agent = AssistantAgent(\n", + " \"WebSearchAgent\",\n", + " description=\"An agent for searching information on the web.\",\n", + " tools=[search_web_tool],\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a web search agent.\n", + " Your only tool is search_tool - use it to find information.\n", + " You make only one search call at a time.\n", + " Once you have the results, you never do calculations based on them.\n", + " \"\"\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "316cf588", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], + "source": [ + "# The data analyst agent that is responsible for performing calculations using the provided tool.\n", + "data_analyst_agent = AssistantAgent(\n", + " \"DataAnalystAgent\",\n", + " description=\"An agent for performing calculations.\",\n", + " model_client=model_client,\n", + " tools=[percentage_change_tool],\n", + " system_message=\"\"\"\n", + " You are a data analyst.\n", + " Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n", + " If you have not seen the data, ask for it.\n", + " \"\"\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6ce48e2d", + "metadata": {}, + "outputs": [], + "source": [ + "# These rules decide when the group chat should stop:\n", + "# - If someone says 'TERMINATE' in the chat, or\n", + "# - If the chat goes on for too many messages (25 turns)\n", + "text_mention_termination = TextMentionTermination(\"TERMINATE\")\n", + "max_messages_termination = MaxMessageTermination(max_messages=25)\n", + "termination = text_mention_termination | max_messages_termination" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "79da2134", + "metadata": {}, + "outputs": [], + "source": [ + "# This is a message that helps the system pick which agent (helper) should talk next.\n", + "# It looks at what has happened so far and chooses the best agent for the next step.\n", + "selector_prompt = \"\"\"Select an agent to perform task.\n", + "{roles}\n", + "Current conversation context:\n", + "{history}\n", + "Read the above conversation, then select an agent from {participants} to perform the next task.\n", + "Make sure the planner agent has assigned tasks before other agents start working.\n", + "Only select one agent.\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4b908087", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], + "source": [ + "# Here we put all our agents (helpers) together into a team.\n", + "# The team will work together to solve the problem, following the rules above.\n", + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=model_client,\n", + " termination_condition=termination,\n", + " selector_prompt=selector_prompt,\n", + " allow_repeated_speaker=True, # Allow an agent to speak multiple turns in a row.\n", + ")\n", + "task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "251c1e82", + "metadata": {}, + "outputs": [], + "source": [ + "nest_asyncio.apply()\n", + "asyncio.run(Console(team.run_stream(task=task)))" + ] + }, + { + "cell_type": "markdown", + "id": "9890cbec", + "metadata": {}, + "source": [ + "You can view data on this run at [app.agentops.ai](app.agentops.ai).\n", + "\n", + "The dashboard will display LLM events for each message sent by each agent, including those made by the human user." + ] + } + ], + "metadata": { + "jupytext": { + "cell_metadata_filter": "-all", + "main_language": "python", + "notebook_metadata_filter": "-all" + }, + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/autogen/GroupChatTeam.py b/examples/autogen/GroupChatTeam.py new file mode 100644 index 000000000..b4c533a21 --- /dev/null +++ b/examples/autogen/GroupChatTeam.py @@ -0,0 +1,132 @@ +# Microsoft Autogen Group Chat Example +# +# AgentOps automatically configures itself when it's initialized meaning your agent run data will be tracked and logged to your AgentOps dashboard right away. +# First let's install the required packages +# %pip install -U "ag2[autogen-agentchat]" +# %pip install -U "autogen-ext[openai]" +# %pip install -U agentops +# %pip install -U python-dotenv + + +import asyncio +from autogen_agentchat.agents import AssistantAgent +from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination +from autogen_agentchat.teams import SelectorGroupChat +from autogen_agentchat.ui import Console +from autogen_ext.models.openai import OpenAIChatCompletionClient +import agentops +from dotenv import load_dotenv +import nest_asyncio + +load_dotenv() +agentops.init(auto_start_session=False, tags=["autogen-group-chat", "agentops-example"]) +tracer = agentops.start_trace(trace_name="autogen-group-chat") + + +# Define mock tools for the agents: +# - search_web_tool: Simulates web search results for specific basketball queries (used by the WebSearchAgent). +# - percentage_change_tool: Calculates the percentage change between two numbers (used by the DataAnalystAgent). +def search_web_tool(query: str) -> str: + if "2006-2007" in query: + return """Here are the total points scored by Miami Heat players in the 2006-2007 season: + Udonis Haslem: 844 points + Dwayne Wade: 1397 points + James Posey: 550 points + ... + """ + elif "2007-2008" in query: + return "The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214." + elif "2008-2009" in query: + return "The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398." + return "No data found." + + +def percentage_change_tool(start: float, end: float) -> float: + return ((end - start) / start) * 100 + + +model_client = OpenAIChatCompletionClient(model="gpt-4o") +# Define the planning agent responsible for breaking down tasks and delegating them to other agents. +planning_agent = AssistantAgent( + "PlanningAgent", + description="An agent for planning tasks, this agent should be the first to engage when given a new task.", + model_client=model_client, + system_message=""" + You are a planning agent. + Your job is to break down complex tasks into smaller, manageable subtasks. + Your team members are: + WebSearchAgent: Searches for information + DataAnalystAgent: Performs calculations + + You only plan and delegate tasks - you do not execute them yourself. + + When assigning tasks, use this format: + 1. : + + After all tasks are complete, summarize the findings and end with "TERMINATE". + """, +) + +# The web search agent that is responsible for retrieving information using the search tool. +web_search_agent = AssistantAgent( + "WebSearchAgent", + description="An agent for searching information on the web.", + tools=[search_web_tool], + model_client=model_client, + system_message=""" + You are a web search agent. + Your only tool is search_tool - use it to find information. + You make only one search call at a time. + Once you have the results, you never do calculations based on them. + """, +) + +# The data analyst agent that is responsible for performing calculations using the provided tool. +data_analyst_agent = AssistantAgent( + "DataAnalystAgent", + description="An agent for performing calculations.", + model_client=model_client, + tools=[percentage_change_tool], + system_message=""" + You are a data analyst. + Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided. + If you have not seen the data, ask for it. + """, +) + + +# These rules decide when the group chat should stop: +# - If someone says 'TERMINATE' in the chat, or +# - If the chat goes on for too many messages (25 turns) +text_mention_termination = TextMentionTermination("TERMINATE") +max_messages_termination = MaxMessageTermination(max_messages=25) +termination = text_mention_termination | max_messages_termination + +# This is a message that helps the system pick which agent (helper) should talk next. +# It looks at what has happened so far and chooses the best agent for the next step. +selector_prompt = """Select an agent to perform task. +{roles} +Current conversation context: +{history} +Read the above conversation, then select an agent from {participants} to perform the next task. +Make sure the planner agent has assigned tasks before other agents start working. +Only select one agent. +""" + +# Here we put all our agents (helpers) together into a team. +# The team will work together to solve the problem, following the rules above. +team = SelectorGroupChat( + [planning_agent, web_search_agent, data_analyst_agent], + model_client=model_client, + termination_condition=termination, + selector_prompt=selector_prompt, + allow_repeated_speaker=True, # Allow an agent to speak multiple turns in a row. +) +task = "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?" + +nest_asyncio.apply() +asyncio.run(Console(team.run_stream(task=task))) + +# You can view data on this run at [app.agentops.ai](app.agentops.ai). + +# The dashboard will display LLM events for each message sent by each agent, including those made by the human user. diff --git a/examples/autogen/MathAgent.ipynb b/examples/autogen/MathAgent.ipynb index a79f88801..cb8fe5bed 100644 --- a/examples/autogen/MathAgent.ipynb +++ b/examples/autogen/MathAgent.ipynb @@ -1,88 +1,47 @@ { "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "bb6538d8-2a5d-4a99-b2c1-7130963e4f7b", - "metadata": {}, - "source": [ - "# Microsoft Autogen Tool Example\n", - "\n", - "AgentOps automatically configures itself when it's initialized meaning your agent run data will be tracked and logged to your AgentOps account right away." - ] - }, - { - "cell_type": "markdown", - "id": "083244fa", - "metadata": {}, - "source": [ - "First let's install the required packages" - ] - }, { "cell_type": "code", "execution_count": null, - "id": "9c8104ad", + "id": "def8a262", "metadata": {}, "outputs": [], "source": [ - "%pip install -U autogen-agentchat\n", + "# Microsoft Autogen Tool Example\n", + "#\n", + "# AgentOps automatically configures itself when it's initialized meaning your agent run data will be tracked and logged to your AgentOps account right away.\n", + "# First let's install the required packages\n", + "%pip install -U \"autogen-agentchat\"\n", "%pip install -U \"autogen-ext[openai]\"\n", "%pip install -U agentops\n", - "%pip install -U python-dotenv" - ] - }, - { - "cell_type": "markdown", - "id": "cc44e459", - "metadata": {}, - "source": [ - "Then import them" + "%pip install -U python-dotenv\n", + "%pip install -U nest_asyncio" ] }, { "cell_type": "code", "execution_count": null, - "id": "7672f591", + "id": "d0441ea7", "metadata": {}, "outputs": [], "source": [ "from typing import Annotated, Literal\n", "import asyncio\n", + "import nest_asyncio\n", "import os\n", "from dotenv import load_dotenv\n", - "from IPython.core.error import (\n", - " StdinNotImplementedError,\n", - ")\n", - "\n", "import agentops\n", - "\n", "from autogen_agentchat.agents import AssistantAgent\n", "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "from autogen_agentchat.messages import TextMessage" - ] - }, - { - "cell_type": "markdown", - "id": "24f8bd70", - "metadata": {}, - "source": [ - "Next, we'll set our API keys. There are several ways to do this, the code below is just the most foolproof way for the purposes of this notebook. It accounts for both users who use environment variables and those who just want to set the API Key here in this notebook.\n", - "\n", - "[Get an AgentOps API key](https://agentops.ai/settings/projects)\n", - "\n", - "1. Create an environment variable in a .env file or other method. By default, the AgentOps `init()` function will look for an environment variable named `AGENTOPS_API_KEY`. Or...\n", - "\n", - "2. Replace `` below and pass in the optional `api_key` parameter to the AgentOps `init(api_key=...)` function. Remember not to commit your API key to a public repo!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9eeaef34", - "metadata": {}, - "outputs": [], - "source": [ + "from autogen_agentchat.messages import TextMessage\n", + "from autogen_core import CancellationToken\n", + "# Next, we'll set our API keys. There are several ways to do this, the code below is just the most foolproof way for the purposes of this notebook. It accounts for both users who use environment variables and those who just want to set the API Key here in this notebook.\n", + "#\n", + "# [Get an AgentOps API key](https://agentops.ai/settings/projects)\n", + "#\n", + "# 1. Create an environment variable in a .env file or other method. By default, the AgentOps `init()` function will look for an environment variable named `AGENTOPS_API_KEY`. Or...\n", + "#\n", + "# 2. Replace `` below and pass in the optional `api_key` parameter to the AgentOps `init(api_key=...)` function. Remember not to commit your API key to a public repo!\n", "load_dotenv()\n", "os.environ[\"AGENTOPS_API_KEY\"] = os.getenv(\"AGENTOPS_API_KEY\", \"your_api_key_here\")\n", "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\")" @@ -91,57 +50,52 @@ { "cell_type": "code", "execution_count": null, - "id": "d93f2339-7b99-4cf1-9232-c24faba49c7b", + "id": "9516e204", "metadata": {}, "outputs": [], "source": [ - "agentops.init(auto_start_session=False)\n", + "agentops.init(auto_start_session=False, trace_name=\"Autogen Math Agent Example\")\n", "tracer = agentops.start_trace(\n", " trace_name=\"Microsoft Autogen Tool Example\", tags=[\"autogen-tool\", \"microsoft-autogen\", \"agentops-example\"]\n", ")" ] }, - { - "cell_type": "markdown", - "id": "7858f0f6-9aca-4cdb-a514-9fbf7e353d50", - "metadata": {}, - "source": [ - "AG2 will now start automatically tracking\n", - "\n", - "* LLM prompts and completions\n", - "* Token usage and costs\n", - "* Agent names and actions\n", - "* Correspondence between agents\n", - "* Tool usage\n", - "* Errors" - ] - }, - { - "cell_type": "markdown", - "id": "dc592637", - "metadata": {}, - "source": [ - "# Tool Example\n", - "AgentOps tracks when AG2 agents use tools. You can find more information on this example in [tool-use.ipynb](https://docs.ag2.ai/docs/tutorial/tool-use#tool-use)" - ] - }, { "cell_type": "code", "execution_count": null, - "id": "9e4dfe37-85e0-4035-a314-3459c6e378c4", + "id": "695eed00", "metadata": {}, "outputs": [], "source": [ - "# Define model and API key\n", + "# Autogen will now start automatically tracking\n", + "#\n", + "# * LLM prompts and completions\n", + "# * Token usage and costs\n", + "# * Agent names and actions\n", + "# * Correspondence between agents\n", + "# * Tool usage\n", + "# * Errors\n", + "# # Tool Example\n", + "# # Define model and API key\n", "model_name = \"gpt-4-turbo\"\n", "api_key = os.getenv(\"OPENAI_API_KEY\")\n", - "\n", + "# Ensure API key is available\n", + "if not api_key:\n", + " raise ValueError(\"OPENAI_API_KEY environment variable is required\")\n", "# Create the model client\n", "model_client = OpenAIChatCompletionClient(model=model_name, api_key=api_key, seed=42, temperature=0)\n", - "\n", - "Operator = Literal[\"+\", \"-\", \"*\", \"/\"]\n", - "\n", - "\n", + "Operator = Literal[\"+\", \"-\", \"*\", \"/\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "07656ee8", + "metadata": { + "lines_to_next_cell": 1 + }, + "outputs": [], + "source": [ "def calculator(a: int, b: int, operator: Annotated[Operator, \"operator\"]) -> int:\n", " if operator == \"+\":\n", " return a + b\n", @@ -152,64 +106,96 @@ " elif operator == \"/\":\n", " return int(a / b)\n", " else:\n", - " raise ValueError(\"Invalid operator\")\n", - "\n", - "\n", + " raise ValueError(\"Invalid operator\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dd2d0fd2", + "metadata": { + "lines_to_next_cell": 1 + }, + "outputs": [], + "source": [ "async def main():\n", + " # Create an assistant agent that can help with math problems and use the calculator tool.\n", " assistant = AssistantAgent(\n", " name=\"Assistant\",\n", " system_message=\"You are a helpful AI assistant. You can help with simple calculations. Return 'TERMINATE' when the task is done.\",\n", " model_client=model_client,\n", " tools=[calculator],\n", " reflect_on_tool_use=True,\n", + " max_tool_iterations=5,\n", " )\n", "\n", + " # This is the math question we want the assistant to solve.\n", " initial_task_message = \"What is (1423 - 123) / 3 + (32 + 23) * 5?\"\n", - " print(f\"User Task: {initial_task_message}\")\n", "\n", " try:\n", - " from autogen_core import CancellationToken\n", - "\n", - " response = await assistant.on_messages(\n", - " [TextMessage(content=initial_task_message, source=\"user\")], CancellationToken()\n", + " # Start tracking the assistant's work for the first way of running the task(on_messages method).\n", + " tracer = agentops.start_trace(\n", + " trace_name=\"autogen-math-agent-on-messages\", tags=[\"autogen-math\", \"agentops-example\"]\n", + " )\n", + " # Ask the assistant to solve the problem .\n", + " await assistant.on_messages([TextMessage(content=initial_task_message, source=\"user\")], CancellationToken())\n", + " agentops.end_trace(tracer, end_state=\"Success\")\n", + " \n", + " # Start tracking for the second way of running the task(using the run method).\n", + " tracer = agentops.start_trace(\n", + " trace_name=\"autogen-math-agent-run\", tags=[\"autogen-math\", \"agentops-example\"]\n", + " )\n", + " # Ask the assistant to solve the problem .\n", + " await assistant.run(\n", + " task=[TextMessage(content=initial_task_message, source=\"user\")],\n", + " cancellation_token=CancellationToken()\n", " )\n", "\n", - " final_response_message = response.chat_message\n", - " if final_response_message:\n", - " print(f\"Assistant: {final_response_message.to_text()}\")\n", - " else:\n", - " print(\"Assistant did not provide a final message.\")\n", - "\n", + " agentops.end_trace(tracer, end_state=\"Success\")\n", + " # Start tracking for the third way: streaming the assistant's responses as they come in(run_stream method).\n", + " tracer = agentops.start_trace(\n", + " trace_name=\"autogen-math-agent-run-stream\", tags=[\"autogen-math\", \"agentops-example\"]\n", + " )\n", + " async for message in assistant.run_stream(\n", + " task=[TextMessage(content=initial_task_message, source=\"user\")],\n", + " cancellation_token=CancellationToken()\n", + " ):\n", + " pass\n", + " agentops.end_trace(tracer, end_state=\"Success\")\n", + " \n", + " # Start tracking for the fourth way: streaming with on_messages_stream(on_messages_stream method).\n", + " tracer = agentops.start_trace(\n", + " trace_name=\"autogen-math-agent-on-messages-stream\", tags=[\"autogen-math\", \"agentops-example\"]\n", + " )\n", + " async for message in assistant.on_messages_stream(\n", + " messages=[TextMessage(content=initial_task_message, source=\"user\")],\n", + " cancellation_token=CancellationToken()\n", + " ):\n", + " pass\n", " agentops.end_trace(tracer, end_state=\"Success\")\n", "\n", - " except StdinNotImplementedError:\n", - " print(\"StdinNotImplementedError: This typically happens in non-interactive environments.\")\n", - " agentops.end_trace(tracer, end_state=\"Indeterminate\")\n", " except Exception as e:\n", " print(f\"An error occurred: {e}\")\n", " agentops.end_trace(tracer, end_state=\"Error\")\n", " finally:\n", - " await model_client.close()\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " try:\n", - " loop = asyncio.get_running_loop()\n", - " except RuntimeError:\n", - " loop = None\n", - "\n", - " if loop and loop.is_running():\n", - " import nest_asyncio\n", - "\n", - " nest_asyncio.apply()\n", - " asyncio.run(main())\n", - " else:\n", - " asyncio.run(main())" + " # Always close the model client when done.\n", + " await model_client.close()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8d18460e", + "metadata": {}, + "outputs": [], + "source": [ + "nest_asyncio.apply()\n", + "asyncio.run(main())" ] }, { "cell_type": "markdown", - "id": "f67b0305-1247-489e-b1b0-829127af76d3", + "id": "192397db", "metadata": {}, "source": [ "You can see your run in action at [app.agentops.ai](app.agentops.ai). In this example, the AgentOps dashboard will show:\n", @@ -221,22 +207,13 @@ } ], "metadata": { - "kernelspec": { - "display_name": "agentops (3.11.11)", - "language": "python", - "name": "python3" + "jupytext": { + "cell_metadata_filter": "-all", + "main_language": "python", + "notebook_metadata_filter": "-all" }, "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" + "name": "python" } }, "nbformat": 4, diff --git a/examples/autogen/MathAgent.py b/examples/autogen/MathAgent.py index c594f05c1..f377016bd 100644 --- a/examples/autogen/MathAgent.py +++ b/examples/autogen/MathAgent.py @@ -2,21 +2,22 @@ # # AgentOps automatically configures itself when it's initialized meaning your agent run data will be tracked and logged to your AgentOps account right away. # First let's install the required packages -# %pip install -U autogen-agentchat +# %pip install -U "autogen-agentchat" # %pip install -U "autogen-ext[openai]" # %pip install -U agentops # %pip install -U python-dotenv -# Then import them +# %pip install -U nest_asyncio + from typing import Annotated, Literal import asyncio +import nest_asyncio import os from dotenv import load_dotenv - import agentops - from autogen_agentchat.agents import AssistantAgent from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.messages import TextMessage +from autogen_core import CancellationToken # Next, we'll set our API keys. There are several ways to do this, the code below is just the most foolproof way for the purposes of this notebook. It accounts for both users who use environment variables and those who just want to set the API Key here in this notebook. # @@ -34,7 +35,7 @@ trace_name="Microsoft Autogen Tool Example", tags=["autogen-tool", "microsoft-autogen", "agentops-example"] ) -# AG2 will now start automatically tracking +# Autogen will now start automatically tracking # # * LLM prompts and completions # * Token usage and costs @@ -43,14 +44,14 @@ # * Tool usage # * Errors # # Tool Example -# AgentOps tracks when AG2 agents use tools. You can find more information on this example in [tool-use.ipynb](https://docs.ag2.ai/docs/tutorial/tool-use#tool-use) # # Define model and API key model_name = "gpt-4-turbo" api_key = os.getenv("OPENAI_API_KEY") - +# Ensure API key is available +if not api_key: + raise ValueError("OPENAI_API_KEY environment variable is required") # Create the model client model_client = OpenAIChatCompletionClient(model=model_name, api_key=api_key, seed=42, temperature=0) - Operator = Literal["+", "-", "*", "/"] @@ -68,62 +69,66 @@ def calculator(a: int, b: int, operator: Annotated[Operator, "operator"]) -> int async def main(): + # Create an assistant agent that can help with math problems and use the calculator tool. assistant = AssistantAgent( name="Assistant", system_message="You are a helpful AI assistant. You can help with simple calculations. Return 'TERMINATE' when the task is done.", model_client=model_client, tools=[calculator], reflect_on_tool_use=True, + max_tool_iterations=5, ) + # This is the math question we want the assistant to solve. initial_task_message = "What is (1423 - 123) / 3 + (32 + 23) * 5?" - print(f"User Task: {initial_task_message}") try: - from autogen_core import CancellationToken + # Start tracking the assistant's work for the first way of running the task(on_messages method). + tracer = agentops.start_trace( + trace_name="autogen-math-agent-on-messages", tags=["autogen-math", "agentops-example"] + ) + # Ask the assistant to solve the problem . + await assistant.on_messages([TextMessage(content=initial_task_message, source="user")], CancellationToken()) + agentops.end_trace(tracer, end_state="Success") - response = await assistant.on_messages( - [TextMessage(content=initial_task_message, source="user")], CancellationToken() + # Start tracking for the second way of running the task(using the run method). + tracer = agentops.start_trace(trace_name="autogen-math-agent-run", tags=["autogen-math", "agentops-example"]) + # Ask the assistant to solve the problem . + await assistant.run( + task=[TextMessage(content=initial_task_message, source="user")], cancellation_token=CancellationToken() ) - final_response_message = response.chat_message - if final_response_message: - print(f"Assistant: {final_response_message.to_text()}") - else: - print("Assistant did not provide a final message.") + agentops.end_trace(tracer, end_state="Success") + # Start tracking for the third way: streaming the assistant's responses as they come in(run_stream method). + tracer = agentops.start_trace( + trace_name="autogen-math-agent-run-stream", tags=["autogen-math", "agentops-example"] + ) + async for message in assistant.run_stream( + task=[TextMessage(content=initial_task_message, source="user")], cancellation_token=CancellationToken() + ): + pass + agentops.end_trace(tracer, end_state="Success") + # Start tracking for the fourth way: streaming with on_messages_stream(on_messages_stream method). + tracer = agentops.start_trace( + trace_name="autogen-math-agent-on-messages-stream", tags=["autogen-math", "agentops-example"] + ) + async for message in assistant.on_messages_stream( + messages=[TextMessage(content=initial_task_message, source="user")], cancellation_token=CancellationToken() + ): + pass agentops.end_trace(tracer, end_state="Success") except Exception as e: print(f"An error occurred: {e}") agentops.end_trace(tracer, end_state="Error") finally: + # Always close the model client when done. await model_client.close() - # Let's check programmatically that spans were recorded in AgentOps - print("\n" + "=" * 50) - print("Now let's verify that our LLM calls were tracked properly...") - try: - agentops.validate_trace_spans(trace_context=tracer) - print("\nāœ… Success! All LLM spans were properly recorded in AgentOps.") - except agentops.ValidationError as e: - print(f"\nāŒ Error validating spans: {e}") - raise - -if __name__ == "__main__": - try: - loop = asyncio.get_running_loop() - except RuntimeError: - loop = None - - if loop and loop.is_running(): - import nest_asyncio - - nest_asyncio.apply() - asyncio.run(main()) - else: - asyncio.run(main()) +nest_asyncio.apply() +asyncio.run(main()) # You can see your run in action at [app.agentops.ai](app.agentops.ai). In this example, the AgentOps dashboard will show: # diff --git a/examples/autogen/SwarmTeam.ipynb b/examples/autogen/SwarmTeam.ipynb new file mode 100644 index 000000000..f623a0280 --- /dev/null +++ b/examples/autogen/SwarmTeam.ipynb @@ -0,0 +1,245 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "1525bef7", + "metadata": {}, + "source": [ + "Microsoft Autogen Swarm Team Example\n", + "\n", + "This example shows how you can have two AI agents work together to help a user refund a flight.\n", + "Each agent has a special job, and they can \"handoff\" the conversation to each other or to the user as needed.\n", + "All actions are tracked by AgentOps so you can see what happened in your dashboard." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8414d1ae", + "metadata": {}, + "outputs": [], + "source": [ + "# First let's install the required packages\n", + "%pip install -U \"ag2[autogen-agentchat]\"\n", + "%pip install -U \"autogen-ext[openai]\"\n", + "%pip install -U agentops\n", + "%pip install -U python-dotenv\n", + "%pip install -U nest_asyncio" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2439f81d", + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Any, Dict, List\n", + "import asyncio\n", + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.conditions import HandoffTermination, TextMentionTermination\n", + "from autogen_agentchat.messages import HandoffMessage\n", + "from autogen_agentchat.teams import Swarm\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "import os\n", + "import agentops\n", + "from dotenv import load_dotenv\n", + "import nest_asyncio" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9aa32238", + "metadata": { + "lines_to_next_cell": 1 + }, + "outputs": [], + "source": [ + "# Load environment variables (like API keys)\n", + "load_dotenv()\n", + "# Set up AgentOps to track everything that happens in this session\n", + "agentops.init(auto_start_session=False, tags=[\"autogen-swarm-team\", \"agentops-example\"])\n", + "tracer = agentops.start_trace(trace_name=\"autogen-swarm-team\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ccfa2986", + "metadata": { + "lines_to_next_cell": 1 + }, + "outputs": [], + "source": [ + "# This is a pretend tool that \"refunds\" a flight when given a flight ID.\n", + "def refund_flight(flight_id: str) -> str:\n", + " \"\"\"Refund a flight\"\"\"\n", + " return f\"Flight {flight_id} refunded\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f393818b", + "metadata": {}, + "outputs": [], + "source": [ + "# Set up the AI model client (the brain for the agents)\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " api_key=os.getenv(\"OPENAI_API_KEY\", \"your_openai_api_key_here\"),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e505972d", + "metadata": {}, + "outputs": [], + "source": [ + "# The travel agent helps with travel tasks and can hand off to the flights_refunder agent.\n", + "travel_agent = AssistantAgent(\n", + " \"travel_agent\",\n", + " model_client=model_client,\n", + " handoffs=[\"flights_refunder\", \"\"],\n", + " system_message=\"\"\"You are a travel agent.\n", + " The flights_refunder is in charge of refunding flights.\n", + " If you need information from the user, you must first send your message, then you can handoff to the user.\n", + " Use TERMINATE when the travel planning is complete.\"\"\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e4114e30", + "metadata": {}, + "outputs": [], + "source": [ + "# The flights_refunder agent specializes in refunding flights and can use the refund_flight tool.\n", + "flights_refunder = AssistantAgent(\n", + " \"flights_refunder\",\n", + " model_client=model_client,\n", + " handoffs=[\"travel_agent\", \"user\"],\n", + " tools=[refund_flight],\n", + " system_message=\"\"\"You are an agent specialized in refunding flights.\n", + " You only need flight reference numbers to refund a flight.\n", + " You have the ability to refund a flight using the refund_flight tool.\n", + " If you need information from the user, you must first send your message, then you can handoff to the user.\n", + " When the transaction is complete, handoff to the travel agent to finalize.\"\"\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ae59c536", + "metadata": { + "lines_to_next_cell": 1 + }, + "outputs": [], + "source": [ + "# These rules decide when the conversation should stop:\n", + "# - If the user is handed the conversation (handoff to user), or\n", + "# - If someone says 'TERMINATE' in the chat\n", + "termination = HandoffTermination(target=\"user\") | TextMentionTermination(\"TERMINATE\")\n", + "# Put both agents together into a \"Swarm\" team so they can work together.\n", + "team = Swarm([travel_agent, flights_refunder], termination_condition=termination)\n", + "# This is the task the user wants help with.\n", + "task = \"I need to refund my flight.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7082d252", + "metadata": { + "lines_to_next_cell": 1 + }, + "outputs": [], + "source": [ + "# This function runs the team and handles the back-and-forth with the user.\n", + "async def run_team_stream() -> None:\n", + " task_result = await Console(team.run_stream(task=task))\n", + " last_message = task_result.messages[-1]\n", + "\n", + " # These are the user's replies, sent automatically to keep the example running.\n", + " scripted_responses = [\n", + " \"My flight reference is ABC123.\",\n", + " \"Yes, thank you. TERMINATE\",\n", + " ]\n", + " response_index = 0\n", + "\n", + " # Keep going as long as the agents hand the conversation to the user.\n", + " while isinstance(last_message, HandoffMessage) and last_message.target == \"user\":\n", + " if response_index >= len(scripted_responses):\n", + " break # Stop if we run out of replies\n", + "\n", + " user_message = scripted_responses[response_index]\n", + " response_index += 1\n", + "\n", + " task_result = await Console(\n", + " team.run_stream(\n", + " task=HandoffMessage(\n", + " source=\"user\", target=last_message.source, content=user_message\n", + " )\n", + " )\n", + " )\n", + " last_message = task_result.messages[-1]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "686dd65b", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], + "source": [ + "# Start the team and let the agents and user work together to solve the problem.\n", + "nest_asyncio.apply()\n", + "asyncio.run(run_team_stream())" + ] + }, + { + "cell_type": "markdown", + "id": "f2625595", + "metadata": {}, + "source": [ + "You can view data on this run at [app.agentops.ai](app.agentops.ai).\n", + "\n", + "The dashboard will display LLM events for each message sent by each agent, including those made by the human user." + ] + } + ], + "metadata": { + "jupytext": { + "cell_metadata_filter": "-all", + "main_language": "python", + "notebook_metadata_filter": "-all" + }, + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/autogen/SwarmTeam.py b/examples/autogen/SwarmTeam.py new file mode 100644 index 000000000..c0800f1ac --- /dev/null +++ b/examples/autogen/SwarmTeam.py @@ -0,0 +1,107 @@ +# Microsoft Autogen Swarm Team Example +# +# This example shows how you can have two AI agents work together to help a user refund a flight. +# Each agent has a special job, and they can "handoff" the conversation to each other or to the user as needed. +# All actions are tracked by AgentOps so you can see what happened in your dashboard. + +# First let's install the required packages +# %pip install -U "ag2[autogen-agentchat]" +# %pip install -U "autogen-ext[openai]" +# %pip install -U agentops +# %pip install -U python-dotenv +# %pip install -U nest_asyncio + +import asyncio +from autogen_agentchat.agents import AssistantAgent +from autogen_agentchat.conditions import HandoffTermination, TextMentionTermination +from autogen_agentchat.messages import HandoffMessage +from autogen_agentchat.teams import Swarm +from autogen_agentchat.ui import Console +from autogen_ext.models.openai import OpenAIChatCompletionClient +import os +import agentops +from dotenv import load_dotenv +import nest_asyncio + +# Load environment variables (like API keys) +load_dotenv() +# Set up AgentOps to track everything that happens in this session +agentops.init(auto_start_session=False, tags=["autogen-swarm-team", "agentops-example"]) +tracer = agentops.start_trace(trace_name="autogen-swarm-team") + + +# This is a pretend tool that "refunds" a flight when given a flight ID. +def refund_flight(flight_id: str) -> str: + """Refund a flight""" + return f"Flight {flight_id} refunded" + + +# Set up the AI model client (the brain for the agents) +model_client = OpenAIChatCompletionClient( + model="gpt-4o", + api_key=os.getenv("OPENAI_API_KEY", "your_openai_api_key_here"), +) + +# The travel agent helps with travel tasks and can hand off to the flights_refunder agent. +travel_agent = AssistantAgent( + "travel_agent", + model_client=model_client, + handoffs=["flights_refunder", ""], + system_message="""You are a travel agent. + The flights_refunder is in charge of refunding flights. + If you need information from the user, you must first send your message, then you can handoff to the user. + Use TERMINATE when the travel planning is complete.""", +) + +# The flights_refunder agent specializes in refunding flights and can use the refund_flight tool. +flights_refunder = AssistantAgent( + "flights_refunder", + model_client=model_client, + handoffs=["travel_agent", "user"], + tools=[refund_flight], + system_message="""You are an agent specialized in refunding flights. + You only need flight reference numbers to refund a flight. + You have the ability to refund a flight using the refund_flight tool. + If you need information from the user, you must first send your message, then you can handoff to the user. + When the transaction is complete, handoff to the travel agent to finalize.""", +) + +# These rules decide when the conversation should stop: +# - If the user is handed the conversation (handoff to user), or +# - If someone says 'TERMINATE' in the chat +termination = HandoffTermination(target="user") | TextMentionTermination("TERMINATE") +# Put both agents together into a "Swarm" team so they can work together. +team = Swarm([travel_agent, flights_refunder], termination_condition=termination) +# This is the task the user wants help with. +task = "I need to refund my flight." + + +# This function runs the team and handles the back-and-forth with the user. +async def run_team_stream() -> None: + task_result = await Console(team.run_stream(task=task)) + last_message = task_result.messages[-1] + + # These are the user's replies, sent automatically to keep the example running. + scripted_responses = [ + "My flight reference is ABC123.", + "Yes, thank you. TERMINATE", + ] + response_index = 0 + + # Keep going as long as the agents hand the conversation to the user. + while isinstance(last_message, HandoffMessage) and last_message.target == "user": + if response_index >= len(scripted_responses): + break # Stop if we run out of replies + + user_message = scripted_responses[response_index] + response_index += 1 + + task_result = await Console( + team.run_stream(task=HandoffMessage(source="user", target=last_message.source, content=user_message)) + ) + last_message = task_result.messages[-1] + + +# Start the team and let the agents and user work together to solve the problem. +nest_asyncio.apply() +asyncio.run(run_team_stream()) diff --git a/examples/autogen/requirements.txt b/examples/autogen/requirements.txt index c0565f058..1b351780c 100644 --- a/examples/autogen/requirements.txt +++ b/examples/autogen/requirements.txt @@ -1,2 +1,3 @@ -autogen-agentchat==0.6.1 -autogen-ext[openai] \ No newline at end of file +autogen-agentchat==0.6.4 +autogen-ext[openai] +nest_asyncio \ No newline at end of file