Skip to content

Commit 8150768

Browse files
Merge pull request #962 from MervinPraison/claude/issue-956-20250716-2316
fix: enable real-time streaming regardless of verbose setting
2 parents a7a664e + fea00ad commit 8150768

File tree

2 files changed

+10
-23
lines changed

2 files changed

+10
-23
lines changed

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1128,11 +1128,6 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
11281128
)
11291129
else:
11301130
# Use the standard OpenAI client approach with tool support
1131-
def custom_display_fn(text, start_time):
1132-
if self.verbose:
1133-
return display_generating(text, start_time)
1134-
return ""
1135-
11361131
# Note: openai_client expects tools in various formats and will format them internally
11371132
# But since we already have formatted_tools, we can pass them directly
11381133
if self._openai_client is None:
@@ -1145,8 +1140,8 @@ def custom_display_fn(text, start_time):
11451140
tools=formatted_tools, # Already formatted for OpenAI
11461141
execute_tool_fn=self.execute_tool,
11471142
stream=stream,
1148-
console=self.console if self.verbose else None,
1149-
display_fn=display_generating if stream and self.verbose else None,
1143+
console=self.console if (self.verbose or stream) else None,
1144+
display_fn=display_generating if stream else None,
11501145
reasoning_steps=reasoning_steps,
11511146
verbose=self.verbose,
11521147
max_iterations=10

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 8 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2468,18 +2468,14 @@ def response(
24682468
)
24692469

24702470
if stream:
2471-
if verbose:
2472-
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
2473-
for chunk in litellm.completion(**completion_params):
2474-
content = self._process_streaming_chunk(chunk)
2475-
if content:
2476-
response_text += content
2477-
live.update(display_generating(response_text, start_time))
2478-
else:
2471+
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
24792472
for chunk in litellm.completion(**completion_params):
24802473
content = self._process_streaming_chunk(chunk)
24812474
if content:
24822475
response_text += content
2476+
live.update(display_generating(response_text, start_time))
2477+
if content:
2478+
response_text += content
24832479
else:
24842480
response = litellm.completion(**completion_params)
24852481
response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
@@ -2568,18 +2564,14 @@ async def aresponse(
25682564
)
25692565

25702566
if stream:
2571-
if verbose:
2572-
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
2573-
async for chunk in await litellm.acompletion(**completion_params):
2574-
content = self._process_streaming_chunk(chunk)
2575-
if content:
2576-
response_text += content
2577-
live.update(display_generating(response_text, start_time))
2578-
else:
2567+
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
25792568
async for chunk in await litellm.acompletion(**completion_params):
25802569
content = self._process_streaming_chunk(chunk)
25812570
if content:
25822571
response_text += content
2572+
live.update(display_generating(response_text, start_time))
2573+
if content:
2574+
response_text += content
25832575
else:
25842576
response = await litellm.acompletion(**completion_params)
25852577
response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""

0 commit comments

Comments
 (0)