@@ -442,6 +442,7 @@ def _handle_completion_success(transaction, linking_metadata, completion_id, kwa
442442 # The function trace will be exited when in the final iteration of the response
443443 # generator.
444444 return_val ._nr_ft = ft
445+ return_val ._nr_metadata = linking_metadata
445446 return_val ._nr_openai_attrs = getattr (return_val , "_nr_openai_attrs" , {})
446447 return_val ._nr_openai_attrs ["messages" ] = kwargs .get ("messages" , [])
447448 return_val ._nr_openai_attrs ["temperature" ] = kwargs .get ("temperature" )
@@ -492,10 +493,14 @@ def _record_completion_success(transaction, linking_metadata, completion_id, kwa
492493 response_model = kwargs .get ("response.model" )
493494 response_id = kwargs .get ("id" )
494495 output_message_list = []
495- finish_reason = None
496+ finish_reason = kwargs . get ( "finish_reason" )
496497 if "content" in kwargs :
497498 output_message_list = [{"content" : kwargs .get ("content" ), "role" : kwargs .get ("role" )}]
498- finish_reason = kwargs .get ("finish_reason" )
499+ # When tools are involved, the content key may hold an empty string which we do not want to report
500+ # In this case, the content we are interested in capturing will already be covered in the input_message_list
501+ # We empty out the output_message_list so that we do not report an empty message
502+ if "tool_call" in finish_reason and not kwargs .get ("content" ):
503+ output_message_list = []
499504 request_model = kwargs .get ("model" ) or kwargs .get ("engine" )
500505
501506 request_id = response_headers .get ("x-request-id" )
@@ -765,7 +770,10 @@ def _record_stream_chunk(self, return_val):
765770
766771def _record_events_on_stop_iteration (self , transaction ):
767772 if hasattr (self , "_nr_ft" ):
768- linking_metadata = get_trace_linking_metadata ()
773+ # We first check for our saved linking metadata before making a new call to get_trace_linking_metadata
774+ # Directly calling get_trace_linking_metadata() causes the incorrect span ID to be captured and associated with the LLM call
775+ # This leads to incorrect linking of the LLM call in the UI
776+ linking_metadata = self ._nr_metadata or get_trace_linking_metadata ()
769777 self ._nr_ft .__exit__ (None , None , None )
770778 try :
771779 openai_attrs = getattr (self , "_nr_openai_attrs" , {})
@@ -872,6 +880,8 @@ def set_attrs_on_generator_proxy(proxy, instance):
872880 proxy ._nr_response_headers = instance ._nr_response_headers
873881 if hasattr (instance , "_nr_openai_attrs" ):
874882 proxy ._nr_openai_attrs = instance ._nr_openai_attrs
883+ if hasattr (instance , "_nr_metadata" ):
884+ proxy ._nr_metadata = instance ._nr_metadata
875885
876886
877887def wrap_engine_api_resource_create_sync (wrapped , instance , args , kwargs ):
0 commit comments