Skip to content

Commit 6e5d100

Browse files
committed
fix: added add_generation_prompt=True without which the LLM does not know when to start a response
1 parent 66243d1 commit 6e5d100

File tree

1 file changed

+1
-0
lines changed

1 file changed

+1
-0
lines changed

mellea/backends/huggingface.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -342,6 +342,7 @@ def _generate_from_context_standard(
342342
input_ids = self._tokenizer.apply_chat_template( # type: ignore
343343
ctx_as_conversation,
344344
tools=convert_tools_to_json(tools), # type: ignore
345+
add_generation_prompt=True,
345346
return_tensors="pt",
346347
**self._make_backend_specific_and_remove(model_options),
347348
).to(self._device) # type: ignore

0 commit comments

Comments
 (0)