Skip to content

Commit 95280c0

Browse files
authored
whitespace cleanup
1 parent 09739df commit 95280c0

File tree

4 files changed

+9
-8
lines changed

4 files changed

+9
-8
lines changed

convert_hf_to_gguf.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7186,11 +7186,12 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
71867186

71877187
return super().modify_tensors(data_torch, name, bid)
71887188

7189+
71897190
@ModelBase.register("PanguEmbeddedForCausalLM")
71907191
class PanguEmbeddedModel(TextModel):
71917192
model_arch = gguf.MODEL_ARCH.PANGU_EMBED
7192-
7193-
def set_vocab(self):
7193+
7194+
def set_vocab(self):
71947195
self._set_vocab_sentencepiece()
71957196

71967197
tokenizer_config_file = self.dir_model / 'tokenizer_config.json'

src/llama-chat.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -820,7 +820,7 @@ int32_t llm_chat_apply_template(
820820
// [unused9]系统:xxx[unused10]
821821
// [unused9]用户:xxx[unused10]
822822
// [unused9]助手:xxx[unused10]
823-
// ...
823+
// ...
824824
for (size_t i = 0; i < chat.size(); ++i) {
825825
const auto & msg = chat[i];
826826
const std::string & role = msg->role;

src/llama-model.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6279,7 +6279,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
62796279
// output
62806280
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
62816281
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6282-
6282+
62836283
// if output is NULL, init from the input tok embed
62846284
if (output == NULL) {
62856285
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);

src/models/pangu-embedded.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -53,16 +53,16 @@ llm_build_pangu_embedded::llm_build_pangu_embedded(const llama_model & model, co
5353
ext_factor, attn_factor, beta_fast, beta_slow
5454
);
5555

56-
Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr,
57-
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
56+
Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr,
57+
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
5858
ext_factor, attn_factor, beta_fast, beta_slow
5959
);
6060

6161
cb(Qcur, "Qcur", il);
6262
cb(Kcur, "Kcur", il);
6363
cb(Vcur, "Vcur", il);
6464

65-
cur = build_attn(inp_attn,
65+
cur = build_attn(inp_attn,
6666
model.layers[il].wo, model.layers[il].bo,
6767
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
6868
}
@@ -75,7 +75,7 @@ llm_build_pangu_embedded::llm_build_pangu_embedded(const llama_model & model, co
7575
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
7676
cb(ffn_inp, "ffn_inp", il);
7777

78-
// feed-forward network
78+
// feed-forward network
7979
cur = build_norm(ffn_inp,
8080
model.layers[il].ffn_norm, NULL,
8181
LLM_NORM_RMS, il);

0 commit comments

Comments
 (0)