diff --git a/README.md b/README.md index bfaacc2ccc6f..a4b3a3c37c19 100644 --- a/README.md +++ b/README.md @@ -144,6 +144,27 @@ response = autogen.Completion.create(context=test_instance, **config) Please find more [code examples](https://microsoft.github.io/autogen/docs/Examples#tune-gpt-models) for this feature. --> +## Monitoring +Basic monitoring is demonstrated in `test/twoagent-monitored.py` + +Monitoring can be implemented using the AgentOps library. + +1. `pip install agentops` +2. Create an account at https://agentops.ai and generate an API key +3. `export AGENTOPS_API_KEY=` +4. When creating a `ConversableAgent` or `AssistantAgent`, first create an Agent Ops client instance + ```python + from agentops import Client + ao_client = Client(api_key=os.environ.get('AGENTOPS_API_KEY'), + tags=['describe your session here']) + ``` +5. Pass this client instance into the constructor arguments of your Agent + ```python + assistant = AssistantAgent("assistant", llm_config={"config_list": config_list}, ao_client=ao_client) + ``` +6. Run your agent(s) and view the results of your session on the [AgentOps Dashboard](https://app.agentops.ai/dashboard) + + ## Documentation You can find detailed documentation about AutoGen [here](https://microsoft.github.io/autogen/). diff --git a/autogen/agentchat/assistant_agent.py b/autogen/agentchat/assistant_agent.py index ce7836da1666..2a1a8177172f 100644 --- a/autogen/agentchat/assistant_agent.py +++ b/autogen/agentchat/assistant_agent.py @@ -1,4 +1,5 @@ from typing import Callable, Dict, Literal, Optional, Union +from agentops import Client as AOClient from .conversable_agent import ConversableAgent @@ -39,6 +40,7 @@ def __init__( human_input_mode: Optional[str] = "NEVER", code_execution_config: Optional[Union[Dict, Literal[False]]] = False, description: Optional[str] = None, + ao_client: AOClient = None, **kwargs, ): """ @@ -67,6 +69,7 @@ def __init__( code_execution_config=code_execution_config, llm_config=llm_config, description=description, + ao_client=ao_client, **kwargs, ) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 493a83da8a56..1e3b96f2eed8 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -10,6 +10,7 @@ from autogen.code_utils import DEFAULT_MODEL, UNKNOWN, content_str, execute_code, extract_code, infer_lang from .agent import Agent +from agentops import Client as AOClient try: from termcolor import colored @@ -56,6 +57,7 @@ def __init__( llm_config: Optional[Union[Dict, Literal[False]]] = None, default_auto_reply: Optional[Union[str, Dict, None]] = "", description: Optional[str] = None, + ao_client: AOClient = None ): """ Args: @@ -99,6 +101,7 @@ def __init__( default_auto_reply (str or dict or None): default auto reply when no code execution or llm-based reply is generated. description (str): a short description of the agent. This description is used by other agents (e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message) + ao_client (AgentOps Client Instance): an instance of and AgentOps client. All agents should share one instance. (Default: None) """ super().__init__(name) # a dictionary of conversations, default value is list @@ -140,6 +143,7 @@ def __init__( self.register_reply([Agent, None], ConversableAgent.a_generate_function_call_reply) self.register_reply([Agent, None], ConversableAgent.check_termination_and_human_reply) self.register_reply([Agent, None], ConversableAgent.a_check_termination_and_human_reply) + self.ao_client = ao_client def register_reply( self, @@ -349,6 +353,8 @@ def send( """ # When the agent composes and sends the message, the role of the message is "assistant" # unless it's "function". + if self.ao_client: + self.ao_client.record_action("send_message_to_another_agent", tags=['conversable_agent', str(self.name())]) valid = self._append_oai_message(message, "assistant", recipient) if valid: recipient.receive(message, self, request_reply, silent) @@ -481,6 +487,8 @@ def receive( Raises: ValueError: if the message can't be converted into a valid ChatCompletion message. """ + if self.ao_client: + self.ao_client.record_action("received_message_from_another_agent", tags=['conversable_agent', str(self.name())]) self._process_received_message(message, sender, silent) if request_reply is False or request_reply is None and self.reply_at_receive[sender] is False: return diff --git a/setup.py b/setup.py index 21de92527a35..74c813292bd8 100644 --- a/setup.py +++ b/setup.py @@ -20,6 +20,7 @@ "flaml", "python-dotenv", "tiktoken", + "agentops" ] setuptools.setup( diff --git a/test/oai/test_utils.py b/test/oai/test_utils.py index 579fc6f9d8a2..ebd113cb6257 100644 --- a/test/oai/test_utils.py +++ b/test/oai/test_utils.py @@ -17,6 +17,7 @@ "OPENAI_API_KEY": "sk-********************", "HUGGING_FACE_API_KEY": "**************************", "ANOTHER_API_KEY": "1234567890234567890", + "AGENTOPS_API_KEY": "********-****-****-****-************" } # Example model to API key mappings diff --git a/test/twoagent-monitored.py b/test/twoagent-monitored.py new file mode 100644 index 000000000000..a31fed523f10 --- /dev/null +++ b/test/twoagent-monitored.py @@ -0,0 +1,16 @@ +import os +from autogen import AssistantAgent, UserProxyAgent, config_list_from_json +from agentops import Client + +ao_client = Client(api_key=os.environ.get('AGENTOPS_API_KEY'), + tags=['autogen', 'Autogen Example']) + +# Load LLM inference endpoints from an env variable or a file +# See https://microsoft.github.io/autogen/docs/FAQ#set-your-api-endpoints +# and OAI_CONFIG_LIST_sample +config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST") + +# pass in the AgentOps client instance for agent monitoring and visibility +assistant = AssistantAgent("assistant", llm_config={"config_list": config_list}, ao_client=ao_client) +user_proxy = UserProxyAgent("user_proxy", code_execution_config={"work_dir": "coding"}) +user_proxy.initiate_chat(assistant, message="Plot a chart of NVDA and TESLA stock price change YTD.")