1414
1515# adapted from Google ADK models adk-python/blob/main/src/google/adk/models/lite_llm.py at f1f44675e4a86b75e72cfd838efd8a0399f23e24 · google/adk-python
1616
17- import json
1817import uuid
1918from typing import Any , Dict , Optional , cast , List , Generator , Tuple , Union
2019
@@ -141,28 +140,26 @@ def ark_field_reorganization(request_data: dict) -> dict:
141140 return request_data
142141
143142
144- def build_cache_metadata (agent_response_id : dict ) -> CacheMetadata :
143+ def build_cache_metadata (response_id : str ) -> CacheMetadata :
145144 """Create a new CacheMetadata instance for agent response tracking.
146145
147146 Args:
148- agent_name: Name of the agent
149147 response_id: Response ID to track
150148
151149 Returns:
152150 A new CacheMetadata instance with the agent-response mapping
153151 """
154- cache_name = json .dumps (agent_response_id )
155152 if "contents_count" in CacheMetadata .model_fields : # adk >= 1.17
156153 cache_metadata = CacheMetadata (
157- cache_name = cache_name ,
154+ cache_name = response_id ,
158155 expire_time = 0 ,
159156 fingerprint = "" ,
160157 invocations_used = 0 ,
161158 contents_count = 0 ,
162159 )
163160 else : # 1.15 <= adk < 1.17
164161 cache_metadata = CacheMetadata (
165- cache_name = cache_name ,
162+ cache_name = response_id ,
166163 expire_time = 0 ,
167164 fingerprint = "" ,
168165 invocations_used = 0 ,
@@ -171,44 +168,6 @@ def build_cache_metadata(agent_response_id: dict) -> CacheMetadata:
171168 return cache_metadata
172169
173170
174- def update_cache_metadata (
175- cache_metadata : CacheMetadata ,
176- agent_name : str ,
177- response_id : str ,
178- ) -> CacheMetadata :
179- """Update cache metadata by creating a new instance with updated cache_name.
180-
181- Since CacheMetadata is frozen, we cannot modify it directly. Instead,
182- we create a new instance with the updated cache_name field.
183- """
184- try :
185- agent_response_id = json .loads (cache_metadata .cache_name )
186- agent_response_id [agent_name ] = response_id
187- updated_cache_name = agent_response_id
188-
189- # Create a new CacheMetadata instance with updated cache_name
190- return build_cache_metadata (updated_cache_name )
191- except json .JSONDecodeError as e :
192- logger .warning (
193- f"Failed to update cache metadata. The cache_name is not a valid JSON string., { str (e )} "
194- )
195- return cache_metadata
196-
197-
198- def get_previous_response_id (
199- cache_metadata : CacheMetadata ,
200- agent_name : str ,
201- ):
202- try :
203- agent_response_id = json .loads (cache_metadata .cache_name )
204- return agent_response_id .get (agent_name , None )
205- except json .JSONDecodeError as e :
206- logger .warning (
207- f"Failed to get previous response id. The cache_name is not a valid JSON string., { str (e )} "
208- )
209- return None
210-
211-
212171class CompletionToResponsesAPIHandler :
213172 def __init__ (self ):
214173 self .litellm_handler = LiteLLMResponsesTransformationHandler ()
@@ -316,7 +275,6 @@ def openai_response_to_generate_content_response(
316275 llm_response = _model_response_to_generate_content_response (model_response )
317276
318277 llm_response = self .adapt_responses_api (
319- llm_request ,
320278 model_response ,
321279 llm_response ,
322280 )
@@ -325,7 +283,6 @@ def openai_response_to_generate_content_response(
325283
326284 def adapt_responses_api (
327285 self ,
328- llm_request : LlmRequest ,
329286 model_response : ModelResponse ,
330287 llm_response : LlmResponse ,
331288 stream : bool = False ,
@@ -334,21 +291,10 @@ def adapt_responses_api(
334291 Adapt responses api.
335292 """
336293 if not model_response .id .startswith ("chatcmpl" ):
337- # if llm_response.custom_metadata is None:
338- # llm_response.custom_metadata = {}
339- # llm_response.custom_metadata["response_id"] = model_response["id"]
340294 previous_response_id = model_response ["id" ]
341- if not llm_request .cache_metadata :
342- llm_response .cache_metadata = build_cache_metadata (
343- {llm_request .config .labels ["adk_agent_name" ]: previous_response_id }
344- )
345- else :
346- llm_response .cache_metadata = update_cache_metadata (
347- llm_request .cache_metadata ,
348- llm_request .config .labels ["adk_agent_name" ],
349- previous_response_id ,
350- )
351-
295+ llm_response .cache_metadata = build_cache_metadata (
296+ previous_response_id ,
297+ )
352298 # add responses cache data
353299 if not stream :
354300 if model_response .get ("usage" , {}).get ("prompt_tokens_details" ):
0 commit comments