Skip to content

Commit b200df9

Browse files
fix: correct indentation error in async streaming code
Fixed IndentationError at line 1325 where the async for loop was not properly indented under the if verbose: block. Also corrected the indentation of the else block for non-verbose streaming. Co-authored-by: Mervin Praison <[email protected]>
1 parent 6acf678 commit b200df9

File tree

1 file changed

+35
-35
lines changed
  • src/praisonai-agents/praisonaiagents/llm

1 file changed

+35
-35
lines changed

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 35 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -1322,43 +1322,43 @@ async def get_response_async(
13221322
tool_calls = []
13231323

13241324
if verbose:
1325-
async for chunk in await litellm.acompletion(
1326-
**self._build_completion_params(
1327-
messages=messages,
1328-
temperature=temperature,
1329-
stream=True,
1330-
tools=formatted_tools,
1331-
**kwargs
1332-
)
1333-
):
1334-
if chunk and chunk.choices and chunk.choices[0].delta:
1335-
delta = chunk.choices[0].delta
1336-
response_text, tool_calls = self._process_stream_delta(
1337-
delta, response_text, tool_calls, formatted_tools
1325+
async for chunk in await litellm.acompletion(
1326+
**self._build_completion_params(
1327+
messages=messages,
1328+
temperature=temperature,
1329+
stream=True,
1330+
tools=formatted_tools,
1331+
**kwargs
13381332
)
1339-
if delta.content:
1340-
print("\033[K", end="\r")
1341-
print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
1333+
):
1334+
if chunk and chunk.choices and chunk.choices[0].delta:
1335+
delta = chunk.choices[0].delta
1336+
response_text, tool_calls = self._process_stream_delta(
1337+
delta, response_text, tool_calls, formatted_tools
1338+
)
1339+
if delta.content:
1340+
print("\033[K", end="\r")
1341+
print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
13421342

1343-
else:
1344-
# Non-verbose streaming
1345-
async for chunk in await litellm.acompletion(
1346-
**self._build_completion_params(
1347-
messages=messages,
1348-
temperature=temperature,
1349-
stream=True,
1350-
tools=formatted_tools,
1351-
**kwargs
1352-
)
1353-
):
1354-
if chunk and chunk.choices and chunk.choices[0].delta:
1355-
delta = chunk.choices[0].delta
1356-
if delta.content:
1357-
response_text += delta.content
1358-
1359-
# Capture tool calls from streaming chunks if provider supports it
1360-
if formatted_tools and self._supports_streaming_tools():
1361-
tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
1343+
else:
1344+
# Non-verbose streaming
1345+
async for chunk in await litellm.acompletion(
1346+
**self._build_completion_params(
1347+
messages=messages,
1348+
temperature=temperature,
1349+
stream=True,
1350+
tools=formatted_tools,
1351+
**kwargs
1352+
)
1353+
):
1354+
if chunk and chunk.choices and chunk.choices[0].delta:
1355+
delta = chunk.choices[0].delta
1356+
if delta.content:
1357+
response_text += delta.content
1358+
1359+
# Capture tool calls from streaming chunks if provider supports it
1360+
if formatted_tools and self._supports_streaming_tools():
1361+
tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
13621362

13631363
response_text = response_text.strip()
13641364

0 commit comments

Comments
 (0)