Skip to content

Commit e5fe089

Browse files
committed
fix style
1 parent 2d56a29 commit e5fe089

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

convert_hf_to_gguf.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6465,7 +6465,7 @@ def set_vocab(self):
64656465
if len(token) == 1:
64666466
continue
64676467
merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
6468-
if len(merged) == 2: #todo this is an assert in Qwen, why?
6468+
if len(merged) == 2: # todo this is an assert in Qwen, why?
64696469
merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
64706470

64716471
# 3. Generate the tokens and toktypes lists
@@ -6527,11 +6527,11 @@ def set_gguf_parameters(self):
65276527
alpha = rope_scaling.get("alpha", 1000)
65286528
base = hparams.get("rope_theta", 10000.0)
65296529
dim = (hparams["hidden_size"] // hparams["num_attention_heads"]) # 128
6530-
scaled_base = base * (alpha ** (dim / (dim-2))) # 10000 * (1000 ** (128 / 126)) = 11158839.9251
6530+
scaled_base = base * (alpha ** (dim / (dim - 2))) # 10000 * (1000 ** (128 / 126)) = 11158839.9251
65316531
self.gguf_writer.add_rope_freq_base(scaled_base)
65326532
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
65336533
self.gguf_writer.add_rope_scaling_factor(1)
6534-
#There is no consistent way to calculate ctx from alpha, and the config is incorrectly set to 32k
6534+
# There is no consistent way to calculate ctx from alpha, and the config is incorrectly set to 32k
65356535
self.gguf_writer.add_rope_scaling_orig_ctx_len(256 * 1024) # 256k context length
65366536
self.gguf_writer.add_context_length(256 * 1024) # 256k context length
65376537

0 commit comments

Comments
 (0)