Skip to content

Commit 6066a01

Browse files
committed
fix: added add_generation_prompt=True without which the LLM does not know when to start a response
1 parent a7da023 commit 6066a01

File tree

1 file changed

+1
-0
lines changed

1 file changed

+1
-0
lines changed

mellea/backends/huggingface.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -341,6 +341,7 @@ def _generate_from_context_standard(
341341
input_ids = self._tokenizer.apply_chat_template( # type: ignore
342342
ctx_as_conversation,
343343
tools=convert_tools_to_json(tools), # type: ignore
344+
add_generation_prompt=True,
344345
return_tensors="pt",
345346
**self._make_backend_specific_and_remove(model_options),
346347
).to(self._device) # type: ignore

0 commit comments

Comments
 (0)