@@ -58,8 +58,68 @@ def llm_gen_ai_completion(params: LLMAttributesParams) -> list[dict] | None:
5858
5959
6060def llm_gen_ai_response_stop_reason (params : LLMAttributesParams ) -> str | None :
61- return params . llm_response . stop_reason
61+ return "<no_stop_reason_provided>"
6262
6363
6464def llm_gen_ai_response_finish_reason (params : LLMAttributesParams ) -> str | None :
65- return params .llm_response .finish_reason
65+ # TODO: update to google-adk v1.12.0
66+ return None
67+
68+
69+ def llm_gen_ai_usage_input_tokens (params : LLMAttributesParams ) -> int | None :
70+ if params .llm_response .usage_metadata :
71+ return params .llm_response .usage_metadata .prompt_token_count
72+ return None
73+
74+
75+ def llm_gen_ai_usage_output_tokens (params : LLMAttributesParams ) -> int | None :
76+ if params .llm_response .usage_metadata :
77+ return params .llm_response .usage_metadata .candidates_token_count
78+ return None
79+
80+
81+ def llm_gen_ai_usage_total_tokens (params : LLMAttributesParams ) -> int | None :
82+ if params .llm_response .usage_metadata :
83+ return params .llm_response .usage_metadata .total_token_count
84+ return None
85+
86+
87+ def llm_gen_ai_usage_cache_creation_input_tokens (
88+ params : LLMAttributesParams ,
89+ ) -> int | None :
90+ if params .llm_response .usage_metadata :
91+ return None
92+ # return params.llm_response.usage_metadata.cached_content_token_count
93+ return None
94+
95+
96+ def llm_gen_ai_usage_cache_read_input_tokens (params : LLMAttributesParams ) -> int | None :
97+ if params .llm_response .usage_metadata :
98+ return None
99+ # return params.llm_response.usage_metadata.prompt_token_count
100+ return None
101+
102+
103+ def llm_gen_ai_is_streaming (params : LLMAttributesParams ) -> bool | None :
104+ # return params.llm_request.stream
105+ return None
106+
107+
108+ LLM_ATTRIBUTES = {
109+ "gen_ai.request.model" : llm_gen_ai_request_model ,
110+ "gen_ai.request.type" : llm_gen_ai_request_type ,
111+ "gen_ai.response.model" : llm_gen_ai_response_model ,
112+ "gen_ai.request.max_tokens" : llm_gen_ai_request_max_tokens ,
113+ "gen_ai.request.temperature" : llm_gen_ai_request_temperature ,
114+ "gen_ai.request.top_p" : llm_gen_ai_request_top_p ,
115+ "gen_ai.prompt" : llm_gen_ai_prompt ,
116+ "gen_ai.completion" : llm_gen_ai_completion ,
117+ "gen_ai.response.stop_reason" : llm_gen_ai_response_stop_reason ,
118+ "gen_ai.response.finish_reason" : llm_gen_ai_response_finish_reason ,
119+ "gen_ai.usage.input_tokens" : llm_gen_ai_usage_input_tokens ,
120+ "gen_ai.usage.output_tokens" : llm_gen_ai_usage_output_tokens ,
121+ "gen_ai.usage.total_tokens" : llm_gen_ai_usage_total_tokens ,
122+ "gen_ai.usage.cache_creation_input_tokens" : llm_gen_ai_usage_cache_creation_input_tokens ,
123+ "gen_ai.usage.cache_read_input_tokens" : llm_gen_ai_usage_cache_read_input_tokens ,
124+ "gen_ai.is_streaming" : llm_gen_ai_is_streaming ,
125+ }
0 commit comments