@@ -550,13 +550,35 @@ def openai_get_input_messages_from_response_input(
550550    Returns: 
551551        - A list of processed messages 
552552    """ 
553+     processed , _  =  _openai_parse_input_response_messages (messages )
554+     return  processed 
555+ 
556+ 
557+ def  _openai_parse_input_response_messages (
558+     messages : Optional [Union [str , List [Dict [str , Any ]]]], system_instructions : Optional [str ] =  None 
559+ ) ->  Tuple [List [Dict [str , Any ]], List [str ]]:
560+     """ 
561+     Parses input messages from the openai responses api into a list of processed messages 
562+     and a list of tool call IDs. 
563+ 
564+     Args: 
565+         messages: A list of output messages 
566+ 
567+     Returns: 
568+         - A list of processed messages 
569+         - A list of tool call IDs 
570+     """ 
553571    processed : List [Dict [str , Any ]] =  []
572+     tool_call_ids : List [str ] =  []
573+ 
574+     if  system_instructions :
575+         processed .append ({"role" : "system" , "content" : system_instructions })
554576
555577    if  not  messages :
556-         return  processed 
578+         return  processed ,  tool_call_ids 
557579
558580    if  isinstance (messages , str ):
559-         return  [{"role" : "user" , "content" : messages }]
581+         return  [{"role" : "user" , "content" : messages }],  tool_call_ids 
560582
561583    for  item  in  messages :
562584        processed_item : Dict [str , Union [str , List [ToolCall ], List [ToolResult ]]] =  {}
@@ -574,7 +596,7 @@ def openai_get_input_messages_from_response_input(
574596                processed_item ["role" ] =  item ["role" ]
575597        elif  "call_id"  in  item  and  ("arguments"  in  item  or  "input"  in  item ):
576598            # Process `ResponseFunctionToolCallParam` or ResponseCustomToolCallParam type from input messages 
577-             arguments_str  =  item .get ("arguments" , "{} " ) or  item .get ("input" , "{}" )
599+             arguments_str  =  item .get ("arguments" , "" ) or  item .get ("input" , OAI_HANDOFF_TOOL_ARG )
578600            arguments  =  safe_load_json (arguments_str )
579601
580602            tool_call_info  =  ToolCall (
@@ -585,7 +607,7 @@ def openai_get_input_messages_from_response_input(
585607            )
586608            processed_item .update (
587609                {
588-                     "role" : "user " ,
610+                     "role" : "assistant " ,
589611                    "tool_calls" : [tool_call_info ],
590612                }
591613            )
@@ -607,10 +629,11 @@ def openai_get_input_messages_from_response_input(
607629                    "tool_results" : [tool_result_info ],
608630                }
609631            )
632+             tool_call_ids .append (item ["call_id" ])
610633        if  processed_item :
611634            processed .append (processed_item )
612635
613-     return  processed 
636+     return  processed ,  tool_call_ids 
614637
615638
616639def  openai_get_output_messages_from_response (response : Optional [Any ]) ->  List [Dict [str , Any ]]:
@@ -630,15 +653,33 @@ def openai_get_output_messages_from_response(response: Optional[Any]) -> List[Di
630653    if  not  messages :
631654        return  []
632655
656+     processed_messages , _  =  _openai_parse_output_response_messages (messages )
657+ 
658+     return  processed_messages 
659+ 
660+ 
661+ def  _openai_parse_output_response_messages (messages : List [Any ]) ->  Tuple [List [Dict [str , Any ]], List [ToolCall ]]:
662+     """ 
663+     Parses output messages from the openai responses api into a list of processed messages 
664+     and a list of tool call outputs. 
665+ 
666+     Args: 
667+         messages: A list of output messages 
668+ 
669+     Returns: 
670+         - A list of processed messages 
671+         - A list of tool call outputs 
672+     """ 
633673    processed : List [Dict [str , Any ]] =  []
674+     tool_call_outputs : List [ToolCall ] =  []
634675
635676    for  item  in  messages :
636677        message  =  {}
637678        message_type  =  _get_attr (item , "type" , "" )
638679
639680        if  message_type  ==  "message" :
640681            text  =  "" 
641-             for  content  in  _get_attr (item , "content" , []):
682+             for  content  in  _get_attr (item , "content" , [])  or  [] :
642683                text  +=  str (_get_attr (content , "text" , "" ) or  "" )
643684                text  +=  str (_get_attr (content , "refusal" , "" ) or  "" )
644685            message .update ({"role" : _get_attr (item , "role" , "assistant" ), "content" : text })
@@ -656,26 +697,29 @@ def openai_get_output_messages_from_response(response: Optional[Any]) -> List[Di
656697                }
657698            )
658699        elif  message_type  ==  "function_call"  or  message_type  ==  "custom_tool_call" :
659-             arguments  =  _get_attr (item , "input" , "" ) or  _get_attr (item , "arguments" , "{}" )
660-             arguments  =  safe_load_json (arguments )
700+             call_id  =  _get_attr (item , "call_id" , "" )
701+             name  =  _get_attr (item , "name" , "" )
702+             raw_arguments  =  _get_attr (item , "input" , "" ) or  _get_attr (item , "arguments" , OAI_HANDOFF_TOOL_ARG )
703+             arguments  =  safe_load_json (raw_arguments )
661704            tool_call_info  =  ToolCall (
662-                 tool_id = _get_attr ( item ,  " call_id" ,  "" ) ,
705+                 tool_id = call_id ,
663706                arguments = arguments ,
664-                 name = _get_attr ( item ,  " name" ,  "" ) ,
707+                 name = name ,
665708                type = _get_attr (item , "type" , "function" ),
666709            )
710+             tool_call_outputs .append (tool_call_info )
667711            message .update (
668712                {
669713                    "tool_calls" : [tool_call_info ],
670714                    "role" : "assistant" ,
671715                }
672716            )
673717        else :
674-             message .update ({"role " : "assistant" , "content " : "Unsupported content type: {}"  . format ( message_type ) })
718+             message .update ({"content " : str ( item ) , "role " : "assistant"  })
675719
676720        processed .append (message )
677721
678-     return  processed 
722+     return  processed ,  tool_call_outputs 
679723
680724
681725def  openai_get_metadata_from_response (
@@ -1071,126 +1115,26 @@ def llmobs_input_messages(self) -> Tuple[List[Dict[str, Any]], List[str]]:
10711115            - A list of processed messages 
10721116            - A list of tool call IDs for span linking purposes 
10731117        """ 
1074-         messages  =  self .input 
1075-         processed : List [Dict [str , Any ]] =  []
1076-         tool_call_ids : List [str ] =  []
1077- 
1078-         if  self .response_system_instructions :
1079-             processed .append ({"role" : "system" , "content" : self .response_system_instructions })
1080- 
1081-         if  not  messages :
1082-             return  processed , tool_call_ids 
1083- 
1084-         if  isinstance (messages , str ):
1085-             return  [{"content" : messages , "role" : "user" }], tool_call_ids 
1086- 
1087-         for  item  in  messages :
1088-             processed_item : Dict [str , Union [str , List [Dict [str , str ]]]] =  {}
1089-             # Handle regular message 
1090-             if  "content"  in  item  and  "role"  in  item :
1091-                 processed_item_content  =  "" 
1092-                 if  isinstance (item ["content" ], list ):
1093-                     for  content  in  item ["content" ]:
1094-                         processed_item_content  +=  content .get ("text" , "" )
1095-                         processed_item_content  +=  content .get ("refusal" , "" )
1096-                 else :
1097-                     processed_item_content  =  item ["content" ]
1098-                 if  processed_item_content :
1099-                     processed_item ["content" ] =  processed_item_content 
1100-                     processed_item ["role" ] =  item ["role" ]
1101-             elif  "call_id"  in  item  and  "arguments"  in  item :
1102-                 """ 
1103-                 Process `ResponseFunctionToolCallParam` type from input messages 
1104-                 """ 
1105-                 try :
1106-                     arguments  =  json .loads (item ["arguments" ])
1107-                 except  json .JSONDecodeError :
1108-                     arguments  =  item ["arguments" ]
1109-                 processed_item ["tool_calls" ] =  [
1110-                     {
1111-                         "tool_id" : item ["call_id" ],
1112-                         "arguments" : arguments ,
1113-                         "name" : item .get ("name" , "" ),
1114-                         "type" : item .get ("type" , "function_call" ),
1115-                     }
1116-                 ]
1117-             elif  "call_id"  in  item  and  "output"  in  item :
1118-                 """ 
1119-                 Process `FunctionCallOutput` type from input messages 
1120-                 """ 
1121-                 output  =  item ["output" ]
1122- 
1123-                 if  isinstance (output , str ):
1124-                     try :
1125-                         output  =  json .loads (output )
1126-                     except  json .JSONDecodeError :
1127-                         output  =  {"output" : output }
1128-                 tool_call_ids .append (item ["call_id" ])
1129-                 processed_item ["role" ] =  "tool" 
1130-                 processed_item ["content" ] =  item ["output" ]
1131-                 processed_item ["tool_id" ] =  item ["call_id" ]
1132-             if  processed_item :
1133-                 processed .append (processed_item )
1118+         return  _openai_parse_input_response_messages (self .input , self .response_system_instructions )
11341119
1135-         return  processed , tool_call_ids 
1136- 
1137-     def  llmobs_output_messages (self ) ->  Tuple [List [Dict [str , Any ]], List [Tuple [str , str , str ]]]:
1120+     def  llmobs_output_messages (self ) ->  Tuple [List [Dict [str , Any ]], List [ToolCall ]]:
11381121        """Returns processed output messages for LLM Obs LLM spans. 
11391122
11401123        Returns: 
11411124            - A list of processed messages 
1142-             - A list of tool call data (name, id, args)  for span linking purposes 
1125+             - A list of tool calls  for span linking purposes 
11431126        """ 
11441127        if  not  self .response  or  not  self .response .output :
11451128            return  [], []
11461129
11471130        messages : List [Any ] =  self .response .output 
1148-         processed : List [Dict [str , Any ]] =  []
1149-         tool_call_outputs : List [Tuple [str , str , str ]] =  []
11501131        if  not  messages :
1151-             return  processed ,  tool_call_outputs 
1132+             return  [], [] 
11521133
11531134        if  not  isinstance (messages , list ):
11541135            messages  =  [messages ]
11551136
1156-         for  item  in  messages :
1157-             message  =  {}
1158-             # Handle content-based messages 
1159-             if  hasattr (item , "content" ):
1160-                 text  =  "" 
1161-                 for  content  in  item .content :
1162-                     if  hasattr (content , "text" ) or  hasattr (content , "refusal" ):
1163-                         text  +=  getattr (content , "text" , "" )
1164-                         text  +=  getattr (content , "refusal" , "" )
1165-                 message .update ({"role" : getattr (item , "role" , "assistant" ), "content" : text })
1166-             # Handle tool calls 
1167-             elif  hasattr (item , "call_id" ) and  hasattr (item , "arguments" ):
1168-                 tool_call_outputs .append (
1169-                     (
1170-                         item .call_id ,
1171-                         getattr (item , "name" , "" ),
1172-                         item .arguments  if  item .arguments  else  OAI_HANDOFF_TOOL_ARG ,
1173-                     )
1174-                 )
1175-                 message .update (
1176-                     {
1177-                         "tool_calls" : [
1178-                             {
1179-                                 "tool_id" : item .call_id ,
1180-                                 "arguments" : (
1181-                                     json .loads (item .arguments ) if  isinstance (item .arguments , str ) else  item .arguments 
1182-                                 ),
1183-                                 "name" : getattr (item , "name" , "" ),
1184-                                 "type" : getattr (item , "type" , "function" ),
1185-                             }
1186-                         ]
1187-                     }
1188-                 )
1189-             else :
1190-                 message .update ({"content" : str (item )})
1191-             processed .append (message )
1192- 
1193-         return  processed , tool_call_outputs 
1137+         return  _openai_parse_output_response_messages (messages )
11941138
11951139    def  llmobs_trace_input (self ) ->  Optional [str ]:
11961140        """Converts Response span data to an input value for top level trace. 
0 commit comments