We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent e98dd36 commit 24bd77cCopy full SHA for 24bd77c
llama_cpp/llama_chat_template.py
@@ -486,6 +486,8 @@ def _handle_streaming_tool_calls(
486
):
487
param_text = param_chunk["choices"][0]["text"]
488
# Convert to chat completion chunk and yield
489
+ print(f"param_text: {param_text}")
490
+ accumulated_text += param_text
491
yield {
492
"id": "chat" + name_completion["id"],
493
"object": "chat.completion.chunk",
@@ -509,7 +511,6 @@ def _handle_streaming_tool_calls(
509
511
"finish_reason": None
510
512
}]
513
}
- accumulated_text += param_text
514
515
# After completing the tool call parameters, continue with more completions
516
# Recursively handle the next completion by starting a new generation
0 commit comments