Skip to content

Commit 454dd77

Browse files
Update OpenAI instrumentation to support autogen compatiblity (#1445)
* Add instrumentation for autogen MCP tool adapter. (#1409) * Add instrumentation for MCP tool adapter. * [MegaLinter] Apply linters fixes * Add autogen agent instrumentation. * Revert "Add autogen agent instrumentation." This reverts commit a26766a. * Add autogen agent instrumentation. * Revert "Add autogen agent instrumentation." This reverts commit a26766a. * Autogen Agent Instrumentation (#1412) * Add autogen agent instrumentation. * Address review feedback. * [MegaLinter] Apply linters fixes --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> * Save linking metadata on generator proxy. * Add comments to changes in instrumentation. --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
1 parent 398bec4 commit 454dd77

File tree

1 file changed

+13
-3
lines changed

1 file changed

+13
-3
lines changed

newrelic/hooks/mlmodel_openai.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -442,6 +442,7 @@ def _handle_completion_success(transaction, linking_metadata, completion_id, kwa
442442
# The function trace will be exited when in the final iteration of the response
443443
# generator.
444444
return_val._nr_ft = ft
445+
return_val._nr_metadata = linking_metadata
445446
return_val._nr_openai_attrs = getattr(return_val, "_nr_openai_attrs", {})
446447
return_val._nr_openai_attrs["messages"] = kwargs.get("messages", [])
447448
return_val._nr_openai_attrs["temperature"] = kwargs.get("temperature")
@@ -492,10 +493,14 @@ def _record_completion_success(transaction, linking_metadata, completion_id, kwa
492493
response_model = kwargs.get("response.model")
493494
response_id = kwargs.get("id")
494495
output_message_list = []
495-
finish_reason = None
496+
finish_reason = kwargs.get("finish_reason")
496497
if "content" in kwargs:
497498
output_message_list = [{"content": kwargs.get("content"), "role": kwargs.get("role")}]
498-
finish_reason = kwargs.get("finish_reason")
499+
# When tools are involved, the content key may hold an empty string which we do not want to report
500+
# In this case, the content we are interested in capturing will already be covered in the input_message_list
501+
# We empty out the output_message_list so that we do not report an empty message
502+
if "tool_call" in finish_reason and not kwargs.get("content"):
503+
output_message_list = []
499504
request_model = kwargs.get("model") or kwargs.get("engine")
500505

501506
request_id = response_headers.get("x-request-id")
@@ -765,7 +770,10 @@ def _record_stream_chunk(self, return_val):
765770

766771
def _record_events_on_stop_iteration(self, transaction):
767772
if hasattr(self, "_nr_ft"):
768-
linking_metadata = get_trace_linking_metadata()
773+
# We first check for our saved linking metadata before making a new call to get_trace_linking_metadata
774+
# Directly calling get_trace_linking_metadata() causes the incorrect span ID to be captured and associated with the LLM call
775+
# This leads to incorrect linking of the LLM call in the UI
776+
linking_metadata = self._nr_metadata or get_trace_linking_metadata()
769777
self._nr_ft.__exit__(None, None, None)
770778
try:
771779
openai_attrs = getattr(self, "_nr_openai_attrs", {})
@@ -872,6 +880,8 @@ def set_attrs_on_generator_proxy(proxy, instance):
872880
proxy._nr_response_headers = instance._nr_response_headers
873881
if hasattr(instance, "_nr_openai_attrs"):
874882
proxy._nr_openai_attrs = instance._nr_openai_attrs
883+
if hasattr(instance, "_nr_metadata"):
884+
proxy._nr_metadata = instance._nr_metadata
875885

876886

877887
def wrap_engine_api_resource_create_sync(wrapped, instance, args, kwargs):

0 commit comments

Comments
 (0)