We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent fea33c9 commit c36ab15Copy full SHA for c36ab15
llama_cpp/llama.py
@@ -408,8 +408,8 @@ def __init__(
408
except:
409
bos_token_id = self.token_bos()
410
411
- eos_token = self.detokenize([eos_token_id]).decode("utf-8")
412
- bos_token = self.detokenize([bos_token_id]).decode("utf-8")
+ eos_token = self._model.token_get_text(eos_token_id)
+ bos_token = self._model.token_get_text(bos_token_id)
413
414
if self.verbose:
415
print(f"Using chat template: {template}", file=sys.stderr)
0 commit comments