Skip to content

Commit 6237dc5

Browse files
Merge pull request #701 from MervinPraison/claude/issue-615-20250627_112613
fix: add stream parameter to disable streaming for custom LLMs
2 parents 5884ee9 + 5743127 commit 6237dc5

File tree

2 files changed

+25
-19
lines changed

2 files changed

+25
-19
lines changed

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -364,6 +364,7 @@ def __init__(
364364
knowledge_config: Optional[Dict[str, Any]] = None,
365365
use_system_prompt: Optional[bool] = True,
366366
markdown: bool = True,
367+
stream: bool = True,
367368
self_reflect: bool = False,
368369
max_reflect: int = 3,
369370
min_reflect: int = 1,
@@ -435,6 +436,8 @@ def __init__(
435436
conversations to establish agent behavior and context. Defaults to True.
436437
markdown (bool, optional): Enable markdown formatting in agent responses for better
437438
readability and structure. Defaults to True.
439+
stream (bool, optional): Enable streaming responses from the language model. Set to False
440+
for LLM providers that don't support streaming. Defaults to True.
438441
self_reflect (bool, optional): Enable self-reflection capabilities where the agent
439442
evaluates and improves its own responses. Defaults to False.
440443
max_reflect (int, optional): Maximum number of self-reflection iterations to prevent
@@ -554,6 +557,7 @@ def __init__(
554557
self.use_system_prompt = use_system_prompt
555558
self.chat_history = []
556559
self.markdown = markdown
560+
self.stream = stream
557561
self.max_reflect = max_reflect
558562
self.min_reflect = min_reflect
559563
self.reflect_prompt = reflect_prompt
@@ -1002,7 +1006,7 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
10021006
tools=formatted_tools if formatted_tools else None,
10031007
verbose=self.verbose,
10041008
markdown=self.markdown,
1005-
stream=True,
1009+
stream=stream,
10061010
console=self.console,
10071011
execute_tool_fn=self.execute_tool,
10081012
agent_name=self.name,
@@ -1018,7 +1022,7 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
10181022
tools=formatted_tools if formatted_tools else None,
10191023
verbose=self.verbose,
10201024
markdown=self.markdown,
1021-
stream=False,
1025+
stream=stream,
10221026
console=self.console,
10231027
execute_tool_fn=self.execute_tool,
10241028
agent_name=self.name,
@@ -1276,7 +1280,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
12761280
agent_tools=agent_tools
12771281
)
12781282

1279-
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=stream)
1283+
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=self.stream)
12801284
if not response:
12811285
return None
12821286

@@ -1371,7 +1375,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
13711375

13721376
logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
13731377
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
1374-
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=stream)
1378+
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=self.stream)
13751379
response_text = response.choices[0].message.content.strip()
13761380
reflection_count += 1
13771381
continue # Continue the loop for more reflections

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -296,6 +296,7 @@ def get_response(
296296
agent_role: Optional[str] = None,
297297
agent_tools: Optional[List[str]] = None,
298298
execute_tool_fn: Optional[Callable] = None,
299+
stream: bool = True,
299300
**kwargs
300301
) -> str:
301302
"""Enhanced get_response with all OpenAI-like features"""
@@ -487,7 +488,7 @@ def get_response(
487488
messages=messages,
488489
tools=formatted_tools,
489490
temperature=temperature,
490-
stream=True,
491+
stream=stream,
491492
**kwargs
492493
)
493494
):
@@ -503,7 +504,7 @@ def get_response(
503504
messages=messages,
504505
tools=formatted_tools,
505506
temperature=temperature,
506-
stream=True,
507+
stream=stream,
507508
**kwargs
508509
)
509510
):
@@ -655,7 +656,7 @@ def get_response(
655656
**self._build_completion_params(
656657
messages=follow_up_messages,
657658
temperature=temperature,
658-
stream=True
659+
stream=stream
659660
)
660661
):
661662
if chunk and chunk.choices and chunk.choices[0].delta.content:
@@ -668,7 +669,7 @@ def get_response(
668669
**self._build_completion_params(
669670
messages=follow_up_messages,
670671
temperature=temperature,
671-
stream=True
672+
stream=stream
672673
)
673674
):
674675
if chunk and chunk.choices and chunk.choices[0].delta.content:
@@ -755,7 +756,7 @@ def get_response(
755756
messages=messages,
756757
tools=formatted_tools,
757758
temperature=temperature,
758-
stream=True,
759+
stream=stream,
759760
**kwargs
760761
)
761762
):
@@ -873,7 +874,7 @@ def get_response(
873874
**self._build_completion_params(
874875
messages=reflection_messages,
875876
temperature=temperature,
876-
stream=True,
877+
stream=stream,
877878
response_format={"type": "json_object"},
878879
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
879880
)
@@ -888,7 +889,7 @@ def get_response(
888889
**self._build_completion_params(
889890
messages=reflection_messages,
890891
temperature=temperature,
891-
stream=True,
892+
stream=stream,
892893
response_format={"type": "json_object"},
893894
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
894895
)
@@ -1004,6 +1005,7 @@ async def get_response_async(
10041005
agent_role: Optional[str] = None,
10051006
agent_tools: Optional[List[str]] = None,
10061007
execute_tool_fn: Optional[Callable] = None,
1008+
stream: bool = True,
10071009
**kwargs
10081010
) -> str:
10091011
"""Async version of get_response with identical functionality."""
@@ -1204,7 +1206,7 @@ async def get_response_async(
12041206
**self._build_completion_params(
12051207
messages=messages,
12061208
temperature=temperature,
1207-
stream=True,
1209+
stream=stream,
12081210
**kwargs
12091211
)
12101212
):
@@ -1218,7 +1220,7 @@ async def get_response_async(
12181220
**self._build_completion_params(
12191221
messages=messages,
12201222
temperature=temperature,
1221-
stream=True,
1223+
stream=stream,
12221224
**kwargs
12231225
)
12241226
):
@@ -1355,7 +1357,7 @@ async def get_response_async(
13551357
**self._build_completion_params(
13561358
messages=follow_up_messages,
13571359
temperature=temperature,
1358-
stream=True
1360+
stream=stream
13591361
)
13601362
):
13611363
if chunk and chunk.choices and chunk.choices[0].delta.content:
@@ -1369,7 +1371,7 @@ async def get_response_async(
13691371
**self._build_completion_params(
13701372
messages=follow_up_messages,
13711373
temperature=temperature,
1372-
stream=True
1374+
stream=stream
13731375
)
13741376
):
13751377
if chunk and chunk.choices and chunk.choices[0].delta.content:
@@ -1437,7 +1439,7 @@ async def get_response_async(
14371439
**self._build_completion_params(
14381440
messages=messages,
14391441
temperature=temperature,
1440-
stream=True,
1442+
stream=stream,
14411443
tools=formatted_tools,
14421444
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
14431445
)
@@ -1453,7 +1455,7 @@ async def get_response_async(
14531455
**self._build_completion_params(
14541456
messages=messages,
14551457
temperature=temperature,
1456-
stream=True,
1458+
stream=stream,
14571459
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
14581460
)
14591461
):
@@ -1534,7 +1536,7 @@ async def get_response_async(
15341536
**self._build_completion_params(
15351537
messages=reflection_messages,
15361538
temperature=temperature,
1537-
stream=True,
1539+
stream=stream,
15381540
response_format={"type": "json_object"},
15391541
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
15401542
)
@@ -1549,7 +1551,7 @@ async def get_response_async(
15491551
**self._build_completion_params(
15501552
messages=reflection_messages,
15511553
temperature=temperature,
1552-
stream=True,
1554+
stream=stream,
15531555
response_format={"type": "json_object"},
15541556
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
15551557
)

0 commit comments

Comments
 (0)