Skip to content

Commit 77715c8

Browse files
committed
fix: added add_generation_prompt=True without which the LLM does not know when to start a response
1 parent 6a7ab9e commit 77715c8

File tree

1 file changed

+1
-0
lines changed

1 file changed

+1
-0
lines changed

mellea/backends/huggingface.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -332,6 +332,7 @@ def _generate_from_context_standard(
332332
input_ids = self._tokenizer.apply_chat_template( # type: ignore
333333
ctx_as_conversation,
334334
tools=convert_tools_to_json(tools), # type: ignore
335+
add_generation_prompt=True,
335336
return_tensors="pt",
336337
**self._make_backend_specific_and_remove(model_options),
337338
).to(self._device) # type: ignore

0 commit comments

Comments
 (0)