@@ -219,6 +219,31 @@ def before_model_modifier(
219
219
return None
220
220
221
221
222
+ # --- Define the Callback Function ---
223
+ def simple_after_model_modifier (
224
+ callback_context : CallbackContext , llm_response : LlmResponse
225
+ ) -> Optional [LlmResponse ]:
226
+ """Stop the consecutive tool calling of the agent"""
227
+ agent_name = callback_context .agent_name
228
+ # --- Inspection ---
229
+ if agent_name == "RecipeAgent" :
230
+ original_text = ""
231
+ if llm_response .content and llm_response .content .parts :
232
+ # Assuming simple text response for this example
233
+ if llm_response .content .role == 'model' and llm_response .content .parts [0 ].text :
234
+ original_text = llm_response .content .parts [0 ].text
235
+ callback_context ._invocation_context .end_invocation = True
236
+ print (f"-----hard stopping the agent execution'" )
237
+
238
+ elif llm_response .error_message :
239
+ print (f"[Callback] Inspected response: Contains error '{ llm_response .error_message } '. No modification." )
240
+ return None
241
+ else :
242
+ print ("[Callback] Inspected response: Empty LlmResponse." )
243
+ return None # Nothing to modify
244
+ return None
245
+
246
+
222
247
shared_state_agent = LlmAgent (
223
248
name = "RecipeAgent" ,
224
249
model = "gemini-2.5-pro" ,
@@ -243,6 +268,7 @@ def before_model_modifier(
243
268
""" ,
244
269
tools = [generate_recipe ],
245
270
before_agent_callback = on_before_agent ,
246
- before_model_callback = before_model_modifier
271
+ before_model_callback = before_model_modifier ,
272
+ after_model_callback = simple_after_model_modifier
247
273
)
248
274
0 commit comments