Skip to content

Commit 1ae6748

Browse files
fix: ensure consistent Task/Response formatting across all LLM providers
- Modified display condition to show formatted output for both OpenAI and custom LLM providers (like Gemini) - Added _final_display_shown flag to prevent duplicate displays within same execution - Reset flag at start of each chat() call for new conversations - Maintains backward compatibility and existing verbose control behavior Fixes #958 🤖 Generated with [Claude Code](https://claude.ai/code) Co-authored-by: Mervin Praison <[email protected]>
1 parent 27972b6 commit 1ae6748

File tree

1 file changed

+9
-2
lines changed
  • src/praisonai-agents/praisonaiagents/agent

1 file changed

+9
-2
lines changed

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -354,6 +354,8 @@ def __init__(
354354
self.instructions = instructions
355355
# Check for model name in environment variable if not provided
356356
self._using_custom_llm = False
357+
# Flag to track if final result has been displayed to prevent duplicates
358+
self._final_display_shown = False
357359

358360
# Store OpenAI client parameters for lazy initialization
359361
self._openai_api_key = api_key
@@ -1178,8 +1180,9 @@ def _execute_callback_and_display(self, prompt: str, response: str, generation_t
11781180
task_description=None, # Not available in this context
11791181
task_id=None # Not available in this context
11801182
)
1181-
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1182-
if self.verbose and not self._using_custom_llm:
1183+
# Always display final interaction when verbose is True to ensure consistent formatting
1184+
# This ensures both OpenAI and custom LLM providers (like Gemini) show formatted output
1185+
if self.verbose and not self._final_display_shown:
11831186
display_interaction(prompt, response, markdown=self.markdown,
11841187
generation_time=generation_time, console=self.console,
11851188
agent_name=self.name,
@@ -1188,8 +1191,12 @@ def _execute_callback_and_display(self, prompt: str, response: str, generation_t
11881191
task_name=None, # Not available in this context
11891192
task_description=None, # Not available in this context
11901193
task_id=None) # Not available in this context
1194+
self._final_display_shown = True
11911195

11921196
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True, task_name=None, task_description=None, task_id=None):
1197+
# Reset the final display flag for each new conversation
1198+
self._final_display_shown = False
1199+
11931200
# Log all parameter values when in debug mode
11941201
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
11951202
param_info = {

0 commit comments

Comments
 (0)