@@ -360,25 +360,33 @@ def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse:
360360
361361
362362LLM_ATTRIBUTES = {
363+ # ===== request attributes =====
363364 "gen_ai.request.model" : llm_gen_ai_request_model ,
364365 "gen_ai.request.type" : llm_gen_ai_request_type ,
365- "gen_ai.response.model" : llm_gen_ai_response_model ,
366366 "gen_ai.request.max_tokens" : llm_gen_ai_request_max_tokens ,
367367 "gen_ai.request.temperature" : llm_gen_ai_request_temperature ,
368368 "gen_ai.request.top_p" : llm_gen_ai_request_top_p ,
369- "gen_ai.prompt" : llm_gen_ai_prompt ,
370- "gen_ai.completion " : llm_gen_ai_completion ,
369+ # ===== response attributes =====
370+ "gen_ai.response.model " : llm_gen_ai_response_model ,
371371 "gen_ai.response.stop_reason" : llm_gen_ai_response_stop_reason ,
372372 "gen_ai.response.finish_reason" : llm_gen_ai_response_finish_reason ,
373- "gen_ai.usage.input_tokens" : llm_gen_ai_usage_input_tokens ,
374- "gen_ai.usage.output_tokens" : llm_gen_ai_usage_output_tokens ,
375- "gen_ai.usage.total_tokens" : llm_gen_ai_usage_total_tokens ,
376- "gen_ai.usage.cache_creation_input_tokens" : llm_gen_ai_usage_cache_creation_input_tokens ,
377- "gen_ai.usage.cache_read_input_tokens" : llm_gen_ai_usage_cache_read_input_tokens ,
373+ # ===== streaming =====
378374 "gen_ai.is_streaming" : llm_gen_ai_is_streaming ,
375+ # ===== span type =====
379376 "gen_ai.operation.name" : llm_gen_ai_operation_name ,
377+ # ===== inputs and outputs =====
378+ # events
380379 "gen_ai.system.message" : llm_gen_ai_system_message ,
381380 "gen_ai.user.message" : llm_gen_ai_user_message ,
382381 "gen_ai.assistant.message" : llm_gen_ai_assistant_message ,
383382 "gen_ai.choice" : llm_gen_ai_choice ,
383+ # attributes
384+ "gen_ai.prompt" : llm_gen_ai_prompt ,
385+ "gen_ai.completion" : llm_gen_ai_completion ,
386+ # ===== usage =====
387+ "gen_ai.usage.input_tokens" : llm_gen_ai_usage_input_tokens ,
388+ "gen_ai.usage.output_tokens" : llm_gen_ai_usage_output_tokens ,
389+ "gen_ai.usage.total_tokens" : llm_gen_ai_usage_total_tokens ,
390+ "gen_ai.usage.cache_creation_input_tokens" : llm_gen_ai_usage_cache_creation_input_tokens ,
391+ "gen_ai.usage.cache_read_input_tokens" : llm_gen_ai_usage_cache_read_input_tokens ,
384392}
0 commit comments