Skip to content

Commit f6e8368

Browse files
committed
debug
1 parent ab78275 commit f6e8368

File tree

1 file changed

+1
-4
lines changed

1 file changed

+1
-4
lines changed

llama_cpp/llama_chat_template.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -228,9 +228,7 @@ def raise_exception(message: str):
228228
strftime_now=self.strftime_now,
229229
)
230230

231-
print("JINJA2 PROMPT")
232231
print(prompt)
233-
print("JINJA2 PROMPT END")
234232

235233
stopping_criteria = None
236234
if self.stop_token_ids is not None:
@@ -437,6 +435,7 @@ def _handle_streaming_tool_calls(
437435
name_completion = llama.create_completion(
438436
prompt=combined_prompt,
439437
grammar=name_grammar,
438+
temperature=0,
440439
stream=False,
441440
stop=[], # Grammar will handle the format including colon
442441
**{k: v for k, v in base_completion_kwargs.items() if k != "stream" and k != "grammar"}
@@ -893,7 +892,6 @@ def chat_completion_handler(
893892
}
894893

895894
# Format the prompt using the chat formatter
896-
print("FORMATTING PROMPT")
897895
result = chat_formatter(
898896
messages=messages,
899897
functions=functions,
@@ -903,7 +901,6 @@ def chat_completion_handler(
903901
)
904902

905903
# Prepare prompt and stopping criteria
906-
print("TOKENIZING PROMPT")
907904
prompt = llama.tokenize(
908905
result.prompt.encode("utf-8"),
909906
add_bos=not result.added_special,

0 commit comments

Comments
 (0)