@@ -653,7 +653,7 @@ def _process_guardrail(self, task_output):
653653 error = f"Agent guardrail validation error: { str (e )} "
654654 )
655655
656- def _apply_guardrail_with_retry (self , response_text , prompt , temperature = 0.2 , tools = None ):
656+ def _apply_guardrail_with_retry (self , response_text , prompt , temperature = 0.2 , tools = None , task_name = None , task_description = None , task_id = None ):
657657 """Apply guardrail validation with retry logic.
658658
659659 Args:
@@ -707,7 +707,7 @@ def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, to
707707 # Regenerate response for retry
708708 try :
709709 retry_prompt = f"{ prompt } \n \n Note: Previous response failed validation due to: { guardrail_result .error } . Please provide an improved response."
710- response = self ._chat_completion ([{"role" : "user" , "content" : retry_prompt }], temperature , tools )
710+ response = self ._chat_completion ([{"role" : "user" , "content" : retry_prompt }], temperature , tools , task_name = task_name , task_description = task_description , task_id = task_id )
711711 if response and response .choices :
712712 current_response = response .choices [0 ].message .content .strip ()
713713 else :
@@ -1072,7 +1072,7 @@ def _process_stream_response(self, messages, temperature, start_time, formatted_
10721072 reasoning_steps = reasoning_steps
10731073 )
10741074
1075- def _chat_completion (self , messages , temperature = 0.2 , tools = None , stream = True , reasoning_steps = False ):
1075+ def _chat_completion (self , messages , temperature = 0.2 , tools = None , stream = True , reasoning_steps = False , task_name = None , task_description = None , task_id = None ):
10761076 start_time = time .time ()
10771077 logging .debug (f"{ self .name } sending messages to LLM: { messages } " )
10781078
@@ -1158,7 +1158,7 @@ def custom_display_fn(text, start_time):
11581158 display_error (f"Error in chat completion: { e } " )
11591159 return None
11601160
1161- def _execute_callback_and_display (self , prompt : str , response : str , generation_time : float ):
1161+ def _execute_callback_and_display (self , prompt : str , response : str , generation_time : float , task_name = None , task_description = None , task_id = None ):
11621162 """Helper method to execute callbacks and display interaction.
11631163
11641164 This centralizes the logic for callback execution and display to avoid duplication.
@@ -1174,9 +1174,9 @@ def _execute_callback_and_display(self, prompt: str, response: str, generation_t
11741174 agent_name = self .name ,
11751175 agent_role = self .role ,
11761176 agent_tools = [t .__name__ for t in self .tools ] if self .tools else None ,
1177- task_name = None , # Not available in this context
1178- task_description = None , # Not available in this context
1179- task_id = None # Not available in this context
1177+ task_name = task_name ,
1178+ task_description = task_description ,
1179+ task_id = task_id
11801180 )
11811181 # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
11821182 if self .verbose and not self ._using_custom_llm :
@@ -1185,9 +1185,9 @@ def _execute_callback_and_display(self, prompt: str, response: str, generation_t
11851185 agent_name = self .name ,
11861186 agent_role = self .role ,
11871187 agent_tools = [t .__name__ for t in self .tools ] if self .tools else None ,
1188- task_name = None , # Not available in this context
1189- task_description = None , # Not available in this context
1190- task_id = None ) # Not available in this context
1188+ task_name = task_name ,
1189+ task_description = task_description ,
1190+ task_id = task_id )
11911191
11921192 def chat (self , prompt , temperature = 0.2 , tools = None , output_json = None , output_pydantic = None , reasoning_steps = False , stream = True , task_name = None , task_description = None , task_id = None ):
11931193 # Log all parameter values when in debug mode
@@ -1297,7 +1297,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
12971297
12981298 # Apply guardrail validation for custom LLM response
12991299 try :
1300- validated_response = self ._apply_guardrail_with_retry (response_text , prompt , temperature , tools )
1300+ validated_response = self ._apply_guardrail_with_retry (response_text , prompt , temperature , tools , task_name , task_description , task_id )
13011301 return validated_response
13021302 except Exception as e :
13031303 logging .error (f"Agent { self .name } : Guardrail validation failed for custom LLM: { e } " )
@@ -1357,7 +1357,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
13571357 agent_tools = agent_tools
13581358 )
13591359
1360- response = self ._chat_completion (messages , temperature = temperature , tools = tools if tools else None , reasoning_steps = reasoning_steps , stream = self .stream )
1360+ response = self ._chat_completion (messages , temperature = temperature , tools = tools if tools else None , reasoning_steps = reasoning_steps , stream = self .stream , task_name = task_name , task_description = task_description , task_id = task_id )
13611361 if not response :
13621362 # Rollback chat history on response failure
13631363 self .chat_history = self .chat_history [:chat_history_length ]
@@ -1372,9 +1372,9 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
13721372 self .chat_history .append ({"role" : "assistant" , "content" : response_text })
13731373 # Apply guardrail validation even for JSON output
13741374 try :
1375- validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools )
1375+ validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools , task_name , task_description , task_id )
13761376 # Execute callback after validation
1377- self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time )
1377+ self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time , task_name , task_description , task_id )
13781378 return validated_response
13791379 except Exception as e :
13801380 logging .error (f"Agent { self .name } : Guardrail validation failed for JSON output: { e } " )
@@ -1391,9 +1391,9 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
13911391 if reasoning_steps and hasattr (response .choices [0 ].message , 'reasoning_content' ):
13921392 # Apply guardrail to reasoning content
13931393 try :
1394- validated_reasoning = self ._apply_guardrail_with_retry (response .choices [0 ].message .reasoning_content , original_prompt , temperature , tools )
1394+ validated_reasoning = self ._apply_guardrail_with_retry (response .choices [0 ].message .reasoning_content , original_prompt , temperature , tools , task_name , task_description , task_id )
13951395 # Execute callback after validation
1396- self ._execute_callback_and_display (original_prompt , validated_reasoning , time .time () - start_time )
1396+ self ._execute_callback_and_display (original_prompt , validated_reasoning , time .time () - start_time , task_name , task_description , task_id )
13971397 return validated_reasoning
13981398 except Exception as e :
13991399 logging .error (f"Agent { self .name } : Guardrail validation failed for reasoning content: { e } " )
@@ -1402,9 +1402,9 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
14021402 return None
14031403 # Apply guardrail to regular response
14041404 try :
1405- validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools )
1405+ validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools , task_name , task_description , task_id )
14061406 # Execute callback after validation
1407- self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time )
1407+ self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time , task_name , task_description , task_id )
14081408 return validated_response
14091409 except Exception as e :
14101410 logging .error (f"Agent { self .name } : Guardrail validation failed: { e } " )
@@ -1426,7 +1426,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
14261426 if self ._using_custom_llm or self ._openai_client is None :
14271427 # For custom LLMs, we need to handle reflection differently
14281428 # Use non-streaming to get complete JSON response
1429- reflection_response = self ._chat_completion (messages , temperature = temperature , tools = None , stream = False , reasoning_steps = False )
1429+ reflection_response = self ._chat_completion (messages , temperature = temperature , tools = None , stream = False , reasoning_steps = False , task_name = task_name , task_description = task_description , task_id = task_id )
14301430
14311431 if not reflection_response or not reflection_response .choices :
14321432 raise Exception ("No response from reflection request" )
@@ -1470,9 +1470,9 @@ def __init__(self, data):
14701470 self .chat_history .append ({"role" : "assistant" , "content" : response_text })
14711471 # Apply guardrail validation after satisfactory reflection
14721472 try :
1473- validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools )
1473+ validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools , task_name , task_description , task_id )
14741474 # Execute callback after validation
1475- self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time )
1475+ self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time , task_name , task_description , task_id )
14761476 return validated_response
14771477 except Exception as e :
14781478 logging .error (f"Agent { self .name } : Guardrail validation failed after reflection: { e } " )
@@ -1488,9 +1488,9 @@ def __init__(self, data):
14881488 self .chat_history .append ({"role" : "assistant" , "content" : response_text })
14891489 # Apply guardrail validation after max reflections
14901490 try :
1491- validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools )
1491+ validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools , task_name , task_description , task_id )
14921492 # Execute callback after validation
1493- self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time )
1493+ self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time , task_name , task_description , task_id )
14941494 return validated_response
14951495 except Exception as e :
14961496 logging .error (f"Agent { self .name } : Guardrail validation failed after max reflections: { e } " )
@@ -1503,7 +1503,7 @@ def __init__(self, data):
15031503 messages .append ({"role" : "user" , "content" : "Now regenerate your response using the reflection you made" })
15041504 # For custom LLMs during reflection, always use non-streaming to ensure complete responses
15051505 use_stream = self .stream if not self ._using_custom_llm else False
1506- response = self ._chat_completion (messages , temperature = temperature , tools = None , stream = use_stream )
1506+ response = self ._chat_completion (messages , temperature = temperature , tools = None , stream = use_stream , task_name = task_name , task_description = task_description , task_id = task_id )
15071507 response_text = response .choices [0 ].message .content .strip ()
15081508 reflection_count += 1
15091509 continue # Continue the loop for more reflections
@@ -1620,7 +1620,9 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
16201620
16211621 # Apply guardrail validation for custom LLM response
16221622 try :
1623- validated_response = self ._apply_guardrail_with_retry (response_text , prompt , temperature , tools )
1623+ validated_response = self ._apply_guardrail_with_retry (response_text , prompt , temperature , tools , task_name , task_description , task_id )
1624+ # Execute callback after validation
1625+ self ._execute_callback_and_display (normalized_content , validated_response , time .time () - start_time , task_name , task_description , task_id )
16241626 return validated_response
16251627 except Exception as e :
16261628 logging .error (f"Agent { self .name } : Guardrail validation failed for custom LLM: { e } " )
@@ -1697,6 +1699,8 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
16971699 if logging .getLogger ().getEffectiveLevel () == logging .DEBUG :
16981700 total_time = time .time () - start_time
16991701 logging .debug (f"Agent.achat completed in { total_time :.2f} seconds" )
1702+ # Execute callback after tool completion
1703+ self ._execute_callback_and_display (original_prompt , result , time .time () - start_time , task_name , task_description , task_id )
17001704 return result
17011705 elif output_json or output_pydantic :
17021706 response = await self ._openai_client .async_client .chat .completions .create (
@@ -1705,11 +1709,13 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
17051709 temperature = temperature ,
17061710 response_format = {"type" : "json_object" }
17071711 )
1708- # Return the raw response
1712+ response_text = response . choices [ 0 ]. message . content
17091713 if logging .getLogger ().getEffectiveLevel () == logging .DEBUG :
17101714 total_time = time .time () - start_time
17111715 logging .debug (f"Agent.achat completed in { total_time :.2f} seconds" )
1712- return response .choices [0 ].message .content
1716+ # Execute callback after JSON/Pydantic completion
1717+ self ._execute_callback_and_display (original_prompt , response_text , time .time () - start_time , task_name , task_description , task_id )
1718+ return response_text
17131719 else :
17141720 response = await self ._openai_client .async_client .chat .completions .create (
17151721 model = self .llm ,
@@ -1804,7 +1810,9 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
18041810
18051811 # Apply guardrail validation for OpenAI client response
18061812 try :
1807- validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools )
1813+ validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools , task_name , task_description , task_id )
1814+ # Execute callback after validation
1815+ self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time , task_name , task_description , task_id )
18081816 return validated_response
18091817 except Exception as e :
18101818 logging .error (f"Agent { self .name } : Guardrail validation failed for OpenAI client: { e } " )
0 commit comments