@@ -334,7 +334,7 @@ def _extract_llama_attributes(self, attributes, request_body):
334334 attributes , GEN_AI_REQUEST_TOP_P , request_body .get ("top_p" )
335335 )
336336 # request for meta llama models does not contain stop_sequences field
337-
337+
338338 def _extract_mistral_attributes (self , attributes , request_body ):
339339 prompt = request_body .get ("prompt" )
340340 if prompt :
@@ -382,24 +382,30 @@ def _get_request_messages(self):
382382 if not messages :
383383 model_id = self ._call_context .params .get (_MODEL_ID_KEY )
384384 if "amazon.titan" in model_id :
385- if input_text := decoded_body .get ("inputText" ):
386- messages = [
387- {"role" : "user" , "content" : [{"text" : input_text }]}
388- ]
385+ messages = self ._get_messages_from_input_text (
386+ decoded_body , "inputText"
387+ )
389388 elif "cohere.command-r" in model_id :
390389 # chat_history can be converted to messages; for now, just use message
391- if input_text := decoded_body .get ("message" ):
392- messages = [
393- {"role" : "user" , "content" : [{"text" : input_text }]}
394- ]
390+ messages = self ._get_messages_from_input_text (
391+ decoded_body , "message"
392+ )
395393 elif "cohere.command" in model_id or "meta.llama" in model_id or "mistral.mistral" in model_id :
396- if input_text := decoded_body .get ("prompt" ):
397- messages = [
398- {"role" : "user" , "content" : [{"text" : input_text }]}
399- ]
394+ messages = self ._get_messages_from_input_text (
395+ decoded_body , "prompt"
396+ )
400397
401398 return system_messages + messages
402399
400+ def _get_messages_from_input_text (
401+ self , decoded_body : dict [str , Any ], input_name : str
402+ ):
403+ if input_text := decoded_body .get (input_name ):
404+ return [
405+ {"role" : "user" , "content" : [{"text" : input_text }]}
406+ ]
407+ return []
408+
403409 def before_service_call (
404410 self , span : Span , instrumentor_context : _BotocoreInstrumentorContext
405411 ):
@@ -827,7 +833,7 @@ def _handle_anthropic_claude_response(
827833 token_usage_histogram .record (
828834 output_tokens , output_attributes
829835 )
830-
836+
831837 def _handle_cohere_command_r_response (
832838 self ,
833839 span : Span ,
@@ -843,13 +849,13 @@ def _handle_cohere_command_r_response(
843849 span .set_attribute (
844850 GEN_AI_RESPONSE_FINISH_REASONS , [response_body ["finish_reason" ]]
845851 )
846-
852+
847853 event_logger = instrumentor_context .event_logger
848854 choice = _Choice .from_invoke_cohere_command_r (
849855 response_body , capture_content
850856 )
851857 event_logger .emit (choice .to_choice_event ())
852-
858+
853859 def _handle_cohere_command_response (
854860 self ,
855861 span : Span ,
@@ -867,7 +873,7 @@ def _handle_cohere_command_response(
867873 span .set_attribute (
868874 GEN_AI_RESPONSE_FINISH_REASONS , [generations ["finish_reason" ]]
869875 )
870-
876+
871877 event_logger = instrumentor_context .event_logger
872878 choice = _Choice .from_invoke_cohere_command (
873879 response_body , capture_content
@@ -913,7 +919,7 @@ def _handle_mistral_ai_response(
913919 span .set_attribute (GEN_AI_USAGE_OUTPUT_TOKENS , estimate_token_count (outputs ["text" ]))
914920 if "stop_reason" in outputs :
915921 span .set_attribute (GEN_AI_RESPONSE_FINISH_REASONS , [outputs ["stop_reason" ]])
916-
922+
917923 event_logger = instrumentor_context .event_logger
918924 choice = _Choice .from_invoke_mistral_mistral (
919925 response_body , capture_content
0 commit comments