Skip to content

Commit 5b272ab

Browse files
committed
Revert "Add autogen agent instrumentation."
This reverts commit a26766a.
1 parent 5529af3 commit 5b272ab

File tree

8 files changed

+14
-990
lines changed

8 files changed

+14
-990
lines changed

newrelic/config.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2866,11 +2866,6 @@ def _process_module_builtin_defaults():
28662866
_process_module_definition(
28672867
"autogen_ext.tools.mcp._base", "newrelic.hooks.mlmodel_autogen", "instrument_autogen_ext_tools_mcp__base"
28682868
)
2869-
_process_module_definition(
2870-
"autogen_agentchat.agents._assistant_agent",
2871-
"newrelic.hooks.mlmodel_autogen",
2872-
"instrument_autogen_agentchat_agents__assistant_agent"
2873-
)
28742869

28752870
_process_module_definition("mcp.client.session", "newrelic.hooks.adapter_mcp", "instrument_mcp_client_session")
28762871

newrelic/hooks/mlmodel_autogen.py

Lines changed: 0 additions & 141 deletions
Original file line numberDiff line numberDiff line change
@@ -12,32 +12,11 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import logging
16-
import json
17-
import sys
18-
import traceback
19-
import uuid
20-
2115
from newrelic.api.function_trace import FunctionTrace
22-
from newrelic.api.time_trace import get_trace_linking_metadata
23-
from newrelic.common.package_version_utils import get_package_version
2416
from newrelic.api.transaction import current_transaction
2517
from newrelic.common.object_names import callable_name
2618
from newrelic.common.object_wrapper import wrap_function_wrapper
2719
from newrelic.common.signature import bind_args
28-
from newrelic.core.config import global_settings
29-
30-
# Check for the presence of the autogen-core, autogen-agentchat, or autogen-ext package as they should all have the
31-
# same version and one or multiple could be installed
32-
AUTOGEN_VERSION = (
33-
get_package_version("autogen-core")
34-
or get_package_version("autogen-agentchat")
35-
or get_package_version("autogen-ext")
36-
)
37-
38-
RECORD_EVENTS_FAILURE_LOG_MESSAGE = "Exception occurred in Autogen instrumentation: Failed to record LLM events. Please report this issue to New Relic Support.\n%s"
39-
40-
_logger = logging.getLogger(__name__)
4120

4221

4322
async def wrap_from_server_params(wrapped, instance, args, kwargs):
@@ -53,126 +32,6 @@ async def wrap_from_server_params(wrapped, instance, args, kwargs):
5332
return await wrapped(*args, **kwargs)
5433

5534

56-
def wrap_on_messages_stream(wrapped, instance, args, kwargs):
57-
transaction = current_transaction()
58-
if not transaction:
59-
return wrapped(*args, **kwargs)
60-
61-
agent_name = getattr(instance, "name", "agent")
62-
func_name = callable_name(wrapped)
63-
function_trace_name = f"{func_name}/{agent_name}"
64-
with FunctionTrace(name=function_trace_name, group="Llm", source=wrapped):
65-
return wrapped(*args, **kwargs)
66-
67-
68-
def _get_llm_metadata(transaction):
69-
# Grab LLM-related custom attributes off of the transaction to store as metadata on LLM events
70-
custom_attrs_dict = transaction._custom_params
71-
llm_metadata_dict = {key: value for key, value in custom_attrs_dict.items() if key.startswith("llm.")}
72-
llm_context_attrs = getattr(transaction, "_llm_context_attrs", None)
73-
if llm_context_attrs:
74-
llm_metadata_dict.update(llm_context_attrs)
75-
76-
return llm_metadata_dict
77-
78-
79-
def _extract_tool_output(return_val, tool_name):
80-
try:
81-
output = getattr(return_val[1], "content", None)
82-
return output
83-
except Exception:
84-
_logger.warning(f"Unable to parse tool output value from {tool_name}. Omitting output from LlmTool event.")
85-
return None
86-
87-
88-
def _construct_base_tool_event_dict(bound_args, tool_call_data, tool_id, transaction, settings):
89-
try:
90-
input = getattr(tool_call_data, "arguments", None)
91-
tool_input = str(input) if input else None
92-
run_id = getattr(tool_call_data, "id", None)
93-
tool_name = getattr(tool_call_data, "name", "tool")
94-
agent_name = bound_args.get("agent_name")
95-
linking_metadata = get_trace_linking_metadata()
96-
97-
tool_event_dict = {
98-
"id": tool_id,
99-
"run_id": run_id,
100-
"name": tool_name,
101-
"span_id": linking_metadata.get("span.id"),
102-
"trace_id": linking_metadata.get("trace.id"),
103-
"agent_name": agent_name,
104-
"vendor": "autogen",
105-
"ingest_source": "Python",
106-
}
107-
if settings.ai_monitoring.record_content.enabled:
108-
tool_event_dict.update({"input": tool_input})
109-
tool_event_dict.update(_get_llm_metadata(transaction))
110-
except Exception:
111-
tool_event_dict = {}
112-
_logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, exc_info=True)
113-
114-
return tool_event_dict
115-
116-
117-
async def wrap__execute_tool_call(wrapped, instance, args, kwargs):
118-
transaction = current_transaction()
119-
if not transaction:
120-
return await wrapped(*args, **kwargs)
121-
122-
settings = transaction.settings if transaction.settings is not None else global_settings()
123-
if not settings.ai_monitoring.enabled:
124-
return await wrapped(*args, **kwargs)
125-
126-
# Framework metric also used for entity tagging in the UI
127-
transaction.add_ml_model_info("Autogen", AUTOGEN_VERSION)
128-
transaction._add_agent_attribute("llm", True)
129-
130-
tool_id = str(uuid.uuid4())
131-
bound_args = bind_args(wrapped, args, kwargs)
132-
tool_call_data = bound_args.get("tool_call")
133-
tool_event_dict = _construct_base_tool_event_dict(bound_args, tool_call_data, tool_id, transaction, settings)
134-
135-
tool_name = getattr(tool_call_data, "name", "tool")
136-
137-
func_name = callable_name(wrapped)
138-
ft = FunctionTrace(name=f"{func_name}/{tool_name}", group="Llm/tool/Autogen")
139-
ft.__enter__()
140-
141-
try:
142-
return_val = await wrapped(*args, **kwargs)
143-
except Exception:
144-
ft.notice_error(attributes={"tool_id": tool_id})
145-
ft.__exit__(*sys.exc_info())
146-
# If we hit an exception, append the error attribute and duration from the exited function trace
147-
tool_event_dict.update({"duration": ft.duration * 1000, "error": True})
148-
transaction.record_custom_event("LlmTool", tool_event_dict)
149-
raise
150-
151-
ft.__exit__(None, None, None)
152-
153-
if not return_val:
154-
return return_val
155-
156-
tool_event_dict.update({"duration": ft.duration * 1000})
157-
158-
# If the tool was executed successfully, we can grab the tool output from the result
159-
tool_output = _extract_tool_output(return_val, tool_name)
160-
if settings.ai_monitoring.record_content.enabled:
161-
tool_event_dict.update({"output": tool_output})
162-
163-
transaction.record_custom_event("LlmTool", tool_event_dict)
164-
165-
return return_val
166-
167-
168-
def instrument_autogen_agentchat_agents__assistant_agent(module):
169-
if hasattr(module, "AssistantAgent"):
170-
if hasattr(module.AssistantAgent, "on_messages_stream"):
171-
wrap_function_wrapper(module, "AssistantAgent.on_messages_stream", wrap_on_messages_stream)
172-
if hasattr(module.AssistantAgent, "_execute_tool_call"):
173-
wrap_function_wrapper(module, "AssistantAgent._execute_tool_call", wrap__execute_tool_call)
174-
175-
17635
def instrument_autogen_ext_tools_mcp__base(module):
17736
if hasattr(module, "McpToolAdapter"):
17837
if hasattr(module.McpToolAdapter, "from_server_params"):

tests/mlmodel_autogen/conftest.py

Lines changed: 1 addition & 149 deletions
Original file line numberDiff line numberDiff line change
@@ -12,29 +12,11 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import pytest
16-
1715
from testing_support.fixture.event_loop import event_loop as loop
1816
from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture
1917

20-
import json
21-
import pytest
22-
23-
from autogen_agentchat.agents import AssistantAgent
24-
from autogen_agentchat.base import TaskResult
25-
from autogen_agentchat.teams import RoundRobinGroupChat
26-
from autogen_core import ComponentModel, FunctionCall, Image
27-
from autogen_core.models import CreateResult, RequestUsage
28-
from autogen_core.models._model_client import ModelFamily
29-
from autogen_ext.models.replay import ReplayChatCompletionClient
30-
31-
from newrelic.common.object_names import callable_name
32-
33-
from pydantic import BaseModel, ValidationError
34-
35-
3618
_default_settings = {
37-
"package_reporting.enabled": False, # Turn off package reporting for testing as it causes slowdowns.
19+
"package_reporting.enabled": False,
3820
"transaction_tracer.explain_threshold": 0.0,
3921
"transaction_tracer.transaction_threshold": 0.0,
4022
"transaction_tracer.stack_trace_threshold": 0.0,
@@ -46,133 +28,3 @@
4628
collector_agent_registration = collector_agent_registration_fixture(
4729
app_name="Python Agent Test (mlmodel_autogen)", default_settings=_default_settings
4830
)
49-
50-
51-
@pytest.fixture
52-
def single_tool_model_client():
53-
model_client = ReplayChatCompletionClient(
54-
[
55-
CreateResult(
56-
finish_reason="function_calls",
57-
content=[FunctionCall(id="1", arguments=json.dumps({"input": "Hello"}), name="add_exclamation")],
58-
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
59-
cached=False,
60-
),
61-
"Hello",
62-
"TERMINATE",
63-
],
64-
model_info={
65-
"function_calling": True,
66-
"vision": True,
67-
"json_output": True,
68-
"family": "gpt-4.1-nano",
69-
"structured_output": True,
70-
},
71-
)
72-
return model_client
73-
74-
75-
@pytest.fixture
76-
def single_tool_model_client_error():
77-
model_client = ReplayChatCompletionClient(
78-
[
79-
CreateResult(
80-
finish_reason="function_calls",
81-
# Set arguments to an invalid type to trigger error in tool
82-
content=[FunctionCall(id="1", arguments=12, name="add_exclamation")],
83-
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
84-
cached=False,
85-
),
86-
"Hello",
87-
"TERMINATE",
88-
],
89-
model_info={
90-
"function_calling": True,
91-
"vision": True,
92-
"json_output": True,
93-
"family": "gpt-4.1-nano",
94-
"structured_output": True,
95-
},
96-
)
97-
return model_client
98-
99-
100-
@pytest.fixture
101-
def multi_tool_model_client():
102-
model_client = ReplayChatCompletionClient(
103-
chat_completions=[
104-
CreateResult(
105-
finish_reason="function_calls",
106-
content=[FunctionCall(id="1", name="add_exclamation", arguments=json.dumps({"input": "Hello"}))],
107-
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
108-
cached=False,
109-
),
110-
CreateResult(
111-
finish_reason="function_calls",
112-
content=[FunctionCall(id="2", name="add_exclamation", arguments=json.dumps({"input": "Goodbye"}))],
113-
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
114-
cached=False,
115-
),
116-
CreateResult(
117-
finish_reason="function_calls",
118-
content=[FunctionCall(id="3", name="compute_sum", arguments=json.dumps({"a": 5, "b": 3}))],
119-
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
120-
cached=False,
121-
),
122-
CreateResult(
123-
finish_reason="function_calls",
124-
content=[FunctionCall(id="4", name="compute_sum", arguments=json.dumps({"a": 123, "b": 2}))],
125-
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
126-
cached=False,
127-
),
128-
],
129-
model_info={
130-
"family": "gpt-4.1-nano",
131-
"function_calling": True,
132-
"json_output": True,
133-
"vision": True,
134-
"structured_output": True,
135-
},
136-
)
137-
return model_client
138-
139-
140-
@pytest.fixture
141-
def multi_tool_model_client_error():
142-
model_client = ReplayChatCompletionClient(
143-
chat_completions=[
144-
CreateResult(
145-
finish_reason="function_calls",
146-
content=[FunctionCall(id="1", name="add_exclamation", arguments=json.dumps({"input": "Hello"}))],
147-
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
148-
cached=False,
149-
),
150-
CreateResult(
151-
finish_reason="function_calls",
152-
content=[FunctionCall(id="2", name="add_exclamation", arguments=json.dumps({"input": "Goodbye"}))],
153-
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
154-
cached=False,
155-
),
156-
CreateResult(
157-
finish_reason="function_calls",
158-
content=[FunctionCall(id="3", name="compute_sum", arguments=json.dumps({"a": 5, "b": 3}))],
159-
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
160-
cached=False,
161-
),
162-
CreateResult(
163-
finish_reason="function_calls",
164-
# Set arguments to an invalid type to trigger error in tool
165-
content=[FunctionCall(id="4", name="compute_sum", arguments=12)],
166-
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
167-
cached=False,
168-
),
169-
],
170-
model_info={
171-
"family": "gpt-4.1-nano",
172-
"function_calling": True,
173-
"json_output": True,
174-
"vision": True,
175-
"structured_output": True,
176-
},
177-
)
178-
return model_client

0 commit comments

Comments
 (0)