@@ -1158,7 +1158,7 @@ def custom_display_fn(text, start_time):
11581158 display_error (f"Error in chat completion: { e } " )
11591159 return None
11601160
1161- def _execute_callback_and_display (self , prompt : str , response : str , generation_time : float ):
1161+ def _execute_callback_and_display (self , prompt : str , response : str , generation_time : float , task_name = None , task_description = None , task_id = None ):
11621162 """Helper method to execute callbacks and display interaction.
11631163
11641164 This centralizes the logic for callback execution and display to avoid duplication.
@@ -1174,9 +1174,9 @@ def _execute_callback_and_display(self, prompt: str, response: str, generation_t
11741174 agent_name = self .name ,
11751175 agent_role = self .role ,
11761176 agent_tools = [t .__name__ for t in self .tools ] if self .tools else None ,
1177- task_name = None , # Not available in this context
1178- task_description = None , # Not available in this context
1179- task_id = None # Not available in this context
1177+ task_name = task_name ,
1178+ task_description = task_description ,
1179+ task_id = task_id
11801180 )
11811181 # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
11821182 if self .verbose and not self ._using_custom_llm :
@@ -1185,9 +1185,9 @@ def _execute_callback_and_display(self, prompt: str, response: str, generation_t
11851185 agent_name = self .name ,
11861186 agent_role = self .role ,
11871187 agent_tools = [t .__name__ for t in self .tools ] if self .tools else None ,
1188- task_name = None , # Not available in this context
1189- task_description = None , # Not available in this context
1190- task_id = None ) # Not available in this context
1188+ task_name = task_name ,
1189+ task_description = task_description ,
1190+ task_id = task_id )
11911191
11921192 def chat (self , prompt , temperature = 0.2 , tools = None , output_json = None , output_pydantic = None , reasoning_steps = False , stream = True , task_name = None , task_description = None , task_id = None ):
11931193 # Log all parameter values when in debug mode
@@ -1374,7 +1374,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
13741374 try :
13751375 validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools )
13761376 # Execute callback after validation
1377- self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time )
1377+ self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time , task_name , task_description , task_id )
13781378 return validated_response
13791379 except Exception as e :
13801380 logging .error (f"Agent { self .name } : Guardrail validation failed for JSON output: { e } " )
@@ -1393,7 +1393,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
13931393 try :
13941394 validated_reasoning = self ._apply_guardrail_with_retry (response .choices [0 ].message .reasoning_content , original_prompt , temperature , tools )
13951395 # Execute callback after validation
1396- self ._execute_callback_and_display (original_prompt , validated_reasoning , time .time () - start_time )
1396+ self ._execute_callback_and_display (original_prompt , validated_reasoning , time .time () - start_time , task_name , task_description , task_id )
13971397 return validated_reasoning
13981398 except Exception as e :
13991399 logging .error (f"Agent { self .name } : Guardrail validation failed for reasoning content: { e } " )
@@ -1404,7 +1404,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
14041404 try :
14051405 validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools )
14061406 # Execute callback after validation
1407- self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time )
1407+ self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time , task_name , task_description , task_id )
14081408 return validated_response
14091409 except Exception as e :
14101410 logging .error (f"Agent { self .name } : Guardrail validation failed: { e } " )
@@ -1472,7 +1472,7 @@ def __init__(self, data):
14721472 try :
14731473 validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools )
14741474 # Execute callback after validation
1475- self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time )
1475+ self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time , task_name , task_description , task_id )
14761476 return validated_response
14771477 except Exception as e :
14781478 logging .error (f"Agent { self .name } : Guardrail validation failed after reflection: { e } " )
@@ -1490,7 +1490,7 @@ def __init__(self, data):
14901490 try :
14911491 validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools )
14921492 # Execute callback after validation
1493- self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time )
1493+ self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time , task_name , task_description , task_id )
14941494 return validated_response
14951495 except Exception as e :
14961496 logging .error (f"Agent { self .name } : Guardrail validation failed after max reflections: { e } " )
@@ -1536,7 +1536,7 @@ def clean_json_output(self, output: str) -> str:
15361536 cleaned = cleaned [:- 3 ].strip ()
15371537 return cleaned
15381538
1539- async def achat (self , prompt : str , temperature = 0.2 , tools = None , output_json = None , output_pydantic = None , reasoning_steps = False ):
1539+ async def achat (self , prompt : str , temperature = 0.2 , tools = None , output_json = None , output_pydantic = None , reasoning_steps = False , task_name = None , task_description = None , task_id = None ):
15401540 """Async version of chat method with self-reflection support."""
15411541 # Log all parameter values when in debug mode
15421542 if logging .getLogger ().getEffectiveLevel () == logging .DEBUG :
@@ -1621,6 +1621,8 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
16211621 # Apply guardrail validation for custom LLM response
16221622 try :
16231623 validated_response = self ._apply_guardrail_with_retry (response_text , prompt , temperature , tools )
1624+ # Execute callback after validation
1625+ self ._execute_callback_and_display (normalized_content , validated_response , time .time () - start_time , task_name , task_description , task_id )
16241626 return validated_response
16251627 except Exception as e :
16261628 logging .error (f"Agent { self .name } : Guardrail validation failed for custom LLM: { e } " )
@@ -1805,6 +1807,8 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
18051807 # Apply guardrail validation for OpenAI client response
18061808 try :
18071809 validated_response = self ._apply_guardrail_with_retry (response_text , original_prompt , temperature , tools )
1810+ # Execute callback after validation
1811+ self ._execute_callback_and_display (original_prompt , validated_response , time .time () - start_time , task_name , task_description , task_id )
18081812 return validated_response
18091813 except Exception as e :
18101814 logging .error (f"Agent { self .name } : Guardrail validation failed for OpenAI client: { e } " )
0 commit comments