@@ -151,9 +151,9 @@ def _extract_llm_attributes(llm_request_dict: dict, llm_response: Any) -> dict:
151151 if "config" in llm_request_dict :
152152 config = llm_request_dict ["config" ]
153153
154- # System instruction
155- if "system_instruction" in config :
156- attributes [SpanAttributes .LLM_REQUEST_SYSTEM_INSTRUCTION ] = config ["system_instruction" ]
154+ # System instruction - commented out, now handled as a system role message
155+ # if "system_instruction" in config:
156+ # attributes[SpanAttributes.LLM_REQUEST_SYSTEM_INSTRUCTION] = config["system_instruction"]
157157
158158 # Temperature
159159 if "temperature" in config :
@@ -192,10 +192,68 @@ def _extract_llm_attributes(llm_request_dict: dict, llm_response: Any) -> dict:
192192 attributes [f"gen_ai.request.tools.{ j } .name" ] = func .get ("name" , "" )
193193 attributes [f"gen_ai.request.tools.{ j } .description" ] = func .get ("description" , "" )
194194
195- # Messages
195+ # Messages - handle system instruction and regular contents
196+ message_index = 0
197+
198+ # First, add system instruction as a system role message if present
199+ # TODO: This is not Chat Completions format but doing this for frontend rendering consistency
200+ if "config" in llm_request_dict and "system_instruction" in llm_request_dict ["config" ]:
201+ system_instruction = llm_request_dict ["config" ]["system_instruction" ]
202+ attributes [MessageAttributes .PROMPT_ROLE .format (i = message_index )] = "system"
203+ attributes [MessageAttributes .PROMPT_CONTENT .format (i = message_index )] = system_instruction
204+ message_index += 1
205+
206+ # Then add regular contents with proper indexing
196207 if "contents" in llm_request_dict :
197- msg_attrs = _extract_messages_from_contents (llm_request_dict ["contents" ])
198- attributes .update (msg_attrs )
208+ for content in llm_request_dict ["contents" ]:
209+ # Get role and normalize it
210+ raw_role = content .get ("role" , "user" )
211+
212+ # Hardcode role mapping for consistency
213+ if raw_role == "model" :
214+ role = "assistant"
215+ elif raw_role == "user" :
216+ role = "user"
217+ elif raw_role == "system" :
218+ role = "system"
219+ else :
220+ role = raw_role # Keep original if not recognized
221+
222+ parts = content .get ("parts" , [])
223+
224+ # Set role
225+ attributes [MessageAttributes .PROMPT_ROLE .format (i = message_index )] = role
226+
227+ # Extract content from parts
228+ text_parts = []
229+ for part in parts :
230+ if "text" in part :
231+ text_parts .append (part ["text" ])
232+ elif "function_call" in part :
233+ # Function calls in prompts are typically from the model's previous responses
234+ func_call = part ["function_call" ]
235+ # Store as a generic attribute since MessageAttributes doesn't have prompt tool calls
236+ attributes [f"gen_ai.prompt.{ message_index } .function_call.name" ] = func_call .get ("name" , "" )
237+ attributes [f"gen_ai.prompt.{ message_index } .function_call.args" ] = json .dumps (
238+ func_call .get ("args" , {})
239+ )
240+ if "id" in func_call :
241+ attributes [f"gen_ai.prompt.{ message_index } .function_call.id" ] = func_call ["id" ]
242+ elif "function_response" in part :
243+ # Function responses are typically user messages with tool results
244+ func_resp = part ["function_response" ]
245+ attributes [f"gen_ai.prompt.{ message_index } .function_response.name" ] = func_resp .get ("name" , "" )
246+ attributes [f"gen_ai.prompt.{ message_index } .function_response.result" ] = json .dumps (
247+ func_resp .get ("response" , {})
248+ )
249+ if "id" in func_resp :
250+ attributes [f"gen_ai.prompt.{ message_index } .function_response.id" ] = func_resp ["id" ]
251+
252+ # Combine text parts
253+ if text_parts :
254+ attributes [MessageAttributes .PROMPT_CONTENT .format (i = message_index )] = "\n " .join (text_parts )
255+
256+ message_index += 1
199257
200258 # Response
201259 if llm_response :
0 commit comments