Skip to content

Commit 105e821

Browse files
committed
cont [no ci]
1 parent 91a3530 commit 105e821

File tree

2 files changed

+21
-25
lines changed

2 files changed

+21
-25
lines changed

examples/tts/convert_pt_to_hf.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -172,10 +172,6 @@ def flatten_state_dict(state_dict, parent_key='', sep='.'):
172172
"n_embd": 768,
173173
"n_layer": 12
174174
},
175-
#"n_embd_posnet": 768,
176-
#"n_embd_convnext": 768,
177-
#"n_layer_posnet": 6,
178-
#"n_layer_convnext": 12
179175
}
180176

181177
with open(path_dst + '/config.json', 'w') as f:

src/llama.cpp

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -2747,37 +2747,37 @@ struct llama_cparams {
27472747

27482748
struct llama_layer_posnet {
27492749
// resnet
2750-
struct ggml_tensor * norm1 = nullptr;
2751-
struct ggml_tensor * norm1_b = nullptr;
2750+
struct ggml_tensor * norm1;
2751+
struct ggml_tensor * norm1_b;
27522752

2753-
struct ggml_tensor * conv1 = nullptr;
2754-
struct ggml_tensor * conv1_b = nullptr;
2753+
struct ggml_tensor * conv1;
2754+
struct ggml_tensor * conv1_b;
27552755

2756-
struct ggml_tensor * norm2 = nullptr;
2757-
struct ggml_tensor * norm2_b = nullptr;
2756+
struct ggml_tensor * norm2;
2757+
struct ggml_tensor * norm2_b;
27582758

2759-
struct ggml_tensor * conv2 = nullptr;
2760-
struct ggml_tensor * conv2_b = nullptr;
2759+
struct ggml_tensor * conv2;
2760+
struct ggml_tensor * conv2_b;
27612761

27622762
// attention
2763-
struct ggml_tensor * attn_norm = nullptr;
2764-
struct ggml_tensor * attn_norm_b = nullptr;
2763+
struct ggml_tensor * attn_norm;
2764+
struct ggml_tensor * attn_norm_b;
27652765

2766-
struct ggml_tensor * attn_q = nullptr;
2767-
struct ggml_tensor * attn_q_b = nullptr;
2766+
struct ggml_tensor * attn_q;
2767+
struct ggml_tensor * attn_q_b;
27682768

2769-
struct ggml_tensor * attn_k = nullptr;
2770-
struct ggml_tensor * attn_k_b = nullptr;
2769+
struct ggml_tensor * attn_k;
2770+
struct ggml_tensor * attn_k_b;
27712771

2772-
struct ggml_tensor * attn_v = nullptr;
2773-
struct ggml_tensor * attn_v_b = nullptr;
2772+
struct ggml_tensor * attn_v;
2773+
struct ggml_tensor * attn_v_b;
27742774

2775-
struct ggml_tensor * attn_o = nullptr;
2776-
struct ggml_tensor * attn_o_b = nullptr;
2775+
struct ggml_tensor * attn_o;
2776+
struct ggml_tensor * attn_o_b;
27772777

27782778
// normalize
2779-
struct ggml_tensor * norm = nullptr;
2780-
struct ggml_tensor * norm_b = nullptr;
2779+
struct ggml_tensor * norm;
2780+
struct ggml_tensor * norm_b;
27812781
};
27822782

27832783
struct llama_layer_convnext {
@@ -2796,7 +2796,6 @@ struct llama_layer_convnext {
27962796
struct ggml_tensor * gamma;
27972797
};
27982798

2799-
// TODO: separate into "llama_layer_enc" and "llama_layer_dec"
28002799
struct llama_layer {
28012800
llama_layer() {
28022801
// initialize all pointers to NULL
@@ -3167,6 +3166,7 @@ struct llama_sbatch {
31673166
// batch indices of the output
31683167
std::vector<size_t> out_ids;
31693168
std::vector<llama_sbatch_seq> seq;
3169+
31703170
const llama_batch * batch = nullptr;
31713171

31723172
// buffers for the ubatch

0 commit comments

Comments
 (0)