Skip to content

Commit 3fa963f

Browse files
committed
Apply comments
1 parent 3326484 commit 3fa963f

File tree

1 file changed

+9
-15
lines changed

1 file changed

+9
-15
lines changed

convert_hf_to_gguf.py

Lines changed: 9 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,6 @@ class ModelBase:
6969
lazy: bool
7070
part_names: list[str]
7171
is_safetensors: bool
72-
is_mistral_format: bool
7372
hparams: dict[str, Any]
7473
tensor_names: set[str] | None
7574
gguf_writer: gguf.GGUFWriter
@@ -91,8 +90,7 @@ def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path,
9190
use_temp_file: bool = False, eager: bool = False,
9291
metadata_override: Path | None = None, model_name: str | None = None,
9392
split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False,
94-
small_first_shard: bool = False, hparams: dict[str, Any] | None = None,
95-
remote_hf_model_id: str | None = None):
93+
small_first_shard: bool = False, hparams: dict[str, Any] | None = None, remote_hf_model_id: str | None = None):
9694
if type(self) is ModelBase or \
9795
type(self) is TextModel or \
9896
type(self) is MmprojModel:
@@ -106,7 +104,6 @@ def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path,
106104
self.use_temp_file = use_temp_file
107105
self.lazy = not eager or (remote_hf_model_id is not None)
108106
self.remote_hf_model_id = remote_hf_model_id
109-
110107
if remote_hf_model_id is not None:
111108
self.is_safetensors = True
112109

@@ -2001,7 +1998,13 @@ def set_vocab(self):
20011998
path_tekken_json = self.dir_model / "tekken.json"
20021999
path_tokenizer_json = self.dir_model / "tokenizer.json"
20032000
if path_tekken_json.is_file() and not path_tokenizer_json.is_file():
2004-
return self.set_vocab_tekken()
2001+
self._set_vocab_mistral()
2002+
2003+
script_dir = Path(__file__).parent
2004+
template_path = script_dir / "models/templates/unsloth-mistral-Devstral-Small-2507.jinja"
2005+
with open(template_path, "r", encoding="utf-8") as f:
2006+
template = f.read()
2007+
self.gguf_writer.add_chat_template(template)
20052008

20062009
try:
20072010
self._set_vocab_sentencepiece()
@@ -2033,16 +2036,7 @@ def set_vocab(self):
20332036

20342037
# Apply to granite small models only
20352038
if self.hparams.get("vocab_size", 32000) == 49152:
2036-
self.gguf_writer.add_add_bos_token(False)
2037-
2038-
def set_vocab_tekken(self):
2039-
self._set_vocab_mistral()
2040-
2041-
script_dir = Path(__file__).parent
2042-
template_path = script_dir / "models/templates/unsloth-mistral-Devstral-Small-2507.jinja"
2043-
with open(template_path, "r", encoding="utf-8") as f:
2044-
template = f.read()
2045-
self.gguf_writer.add_chat_template(template)
2039+
self.gguf_writer.add_add_bos_token(False)
20462040

20472041
def set_gguf_parameters(self):
20482042
super().set_gguf_parameters()

0 commit comments

Comments
 (0)