Skip to content

Commit 680757d

Browse files
committed
Refactor: wtype per tensor
1 parent fe2d867 commit 680757d

22 files changed

+212
-174
lines changed

clip.hpp

Lines changed: 28 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -533,9 +533,12 @@ class CLIPEmbeddings : public GGMLBlock {
533533
int64_t vocab_size;
534534
int64_t num_positions;
535535

536-
void init_params(struct ggml_context* ctx, ggml_type wtype) {
537-
params["token_embedding.weight"] = ggml_new_tensor_2d(ctx, wtype, embed_dim, vocab_size);
538-
params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, embed_dim, num_positions);
536+
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
537+
enum ggml_type token_wtype = (tensor_types.find(prefix + "token_embedding.weight") != tensor_types.end()) ? tensor_types[prefix + "token_embedding.weight"] : GGML_TYPE_F32;
538+
enum ggml_type position_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "position_embedding.weight") != tensor_types.end()) ? tensor_types[prefix + "position_embedding.weight"] : GGML_TYPE_F32;
539+
540+
params["token_embedding.weight"] = ggml_new_tensor_2d(ctx, token_wtype, embed_dim, vocab_size);
541+
params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, position_wtype, embed_dim, num_positions);
539542
}
540543

541544
public:
@@ -579,11 +582,14 @@ class CLIPVisionEmbeddings : public GGMLBlock {
579582
int64_t image_size;
580583
int64_t num_patches;
581584
int64_t num_positions;
585+
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
586+
enum ggml_type patch_wtype = GGML_TYPE_F16; // tensor_types.find(prefix + "patch_embedding.weight") != tensor_types.end() ? tensor_types[prefix + "patch_embedding.weight"] : GGML_TYPE_F16;
587+
enum ggml_type class_wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "class_embedding") != tensor_types.end() ? tensor_types[prefix + "class_embedding"] : GGML_TYPE_F32;
588+
enum ggml_type position_wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "position_embedding.weight") != tensor_types.end() ? tensor_types[prefix + "position_embedding.weight"] : GGML_TYPE_F32;
582589

583-
void init_params(struct ggml_context* ctx, ggml_type wtype) {
584-
params["patch_embedding.weight"] = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, patch_size, patch_size, num_channels, embed_dim);
585-
params["class_embedding"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, embed_dim);
586-
params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, embed_dim, num_positions);
590+
params["patch_embedding.weight"] = ggml_new_tensor_4d(ctx, patch_wtype, patch_size, patch_size, num_channels, embed_dim);
591+
params["class_embedding"] = ggml_new_tensor_1d(ctx, class_wtype, embed_dim);
592+
params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, position_wtype, embed_dim, num_positions);
587593
}
588594

589595
public:
@@ -639,9 +645,10 @@ enum CLIPVersion {
639645

640646
class CLIPTextModel : public GGMLBlock {
641647
protected:
642-
void init_params(struct ggml_context* ctx, ggml_type wtype) {
648+
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
643649
if (version == OPEN_CLIP_VIT_BIGG_14) {
644-
params["text_projection"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, projection_dim, hidden_size);
650+
enum ggml_type wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "text_projection") != tensor_types.end() ? tensor_types[prefix + "text_projection"] : GGML_TYPE_F32;
651+
params["text_projection"] = ggml_new_tensor_2d(ctx, wtype, projection_dim, hidden_size);
645652
}
646653
}
647654

@@ -712,7 +719,7 @@ class CLIPTextModel : public GGMLBlock {
712719
auto text_projection = params["text_projection"];
713720
ggml_tensor* pooled = ggml_view_1d(ctx, x, hidden_size, x->nb[1] * max_token_idx);
714721
if (text_projection != NULL) {
715-
pooled = ggml_nn_linear(ctx, pooled, text_projection, NULL);
722+
pooled = ggml_nn_linear(ctx, pooled, text_projection, NULL);
716723
} else {
717724
LOG_DEBUG("Missing text_projection matrix, assuming identity...");
718725
}
@@ -783,9 +790,9 @@ class CLIPProjection : public UnaryBlock {
783790
int64_t out_features;
784791
bool transpose_weight;
785792

786-
void init_params(struct ggml_context* ctx, ggml_type wtype) {
793+
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
794+
enum ggml_type wtype = tensor_types.find(prefix + "weight") != tensor_types.end() ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
787795
if (transpose_weight) {
788-
LOG_ERROR("transpose_weight");
789796
params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features);
790797
} else {
791798
params["weight"] = ggml_new_tensor_2d(ctx, wtype, in_features, out_features);
@@ -846,12 +853,13 @@ struct CLIPTextModelRunner : public GGMLRunner {
846853
CLIPTextModel model;
847854

848855
CLIPTextModelRunner(ggml_backend_t backend,
849-
ggml_type wtype,
856+
std::map<std::string, enum ggml_type>& tensor_types,
857+
const std::string prefix,
850858
CLIPVersion version = OPENAI_CLIP_VIT_L_14,
851859
int clip_skip_value = 1,
852860
bool with_final_ln = true)
853-
: GGMLRunner(backend, wtype), model(version, clip_skip_value, with_final_ln) {
854-
model.init(params_ctx, wtype);
861+
: GGMLRunner(backend), model(version, clip_skip_value, with_final_ln) {
862+
model.init(params_ctx, tensor_types, prefix);
855863
}
856864

857865
std::string get_desc() {
@@ -893,13 +901,13 @@ struct CLIPTextModelRunner : public GGMLRunner {
893901
struct ggml_tensor* embeddings = NULL;
894902

895903
if (num_custom_embeddings > 0 && custom_embeddings_data != NULL) {
896-
auto custom_embeddings = ggml_new_tensor_2d(compute_ctx,
897-
wtype,
898-
model.hidden_size,
899-
num_custom_embeddings);
904+
auto token_embed_weight = model.get_token_embed_weight();
905+
auto custom_embeddings = ggml_new_tensor_2d(compute_ctx,
906+
token_embed_weight->type,
907+
model.hidden_size,
908+
num_custom_embeddings);
900909
set_backend_tensor_data(custom_embeddings, custom_embeddings_data);
901910

902-
auto token_embed_weight = model.get_token_embed_weight();
903911
// concatenate custom embeddings
904912
embeddings = ggml_concat(compute_ctx, token_embed_weight, custom_embeddings, 1);
905913
}

common.hpp

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -182,9 +182,11 @@ class GEGLU : public GGMLBlock {
182182
int64_t dim_in;
183183
int64_t dim_out;
184184

185-
void init_params(struct ggml_context* ctx, ggml_type wtype) {
185+
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = "") {
186+
enum ggml_type wtype = (tensor_types.find(prefix + "proj.weight") != tensor_types.end()) ? tensor_types[prefix + "proj.weight"] : GGML_TYPE_F32;
187+
enum ggml_type bias_wtype = GGML_TYPE_F32;//(tensor_types.find(prefix + "proj.bias") != tensor_types.end()) ? tensor_types[prefix + "proj.bias"] : GGML_TYPE_F32;
186188
params["proj.weight"] = ggml_new_tensor_2d(ctx, wtype, dim_in, dim_out * 2);
187-
params["proj.bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, dim_out * 2);
189+
params["proj.bias"] = ggml_new_tensor_1d(ctx, bias_wtype, dim_out * 2);
188190
}
189191

190192
public:
@@ -433,8 +435,10 @@ class SpatialTransformer : public GGMLBlock {
433435

434436
class AlphaBlender : public GGMLBlock {
435437
protected:
436-
void init_params(struct ggml_context* ctx, ggml_type wtype) {
437-
params["mix_factor"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
438+
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = "") {
439+
// Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix
440+
enum ggml_type wtype = GGML_TYPE_F32;//(tensor_types.ypes.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32;
441+
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
438442
}
439443

440444
float get_alpha() {

conditioner.hpp

Lines changed: 24 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,6 @@ struct Conditioner {
4545
struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
4646
SDVersion version = VERSION_SD1;
4747
CLIPTokenizer tokenizer;
48-
ggml_type wtype;
4948
std::shared_ptr<CLIPTextModelRunner> text_model;
5049
std::shared_ptr<CLIPTextModelRunner> text_model2;
5150

@@ -56,24 +55,24 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
5655
std::vector<std::string> readed_embeddings;
5756

5857
FrozenCLIPEmbedderWithCustomWords(ggml_backend_t backend,
59-
ggml_type wtype,
58+
std::map<std::string, enum ggml_type>& tensor_types,
6059
const std::string& embd_dir,
6160
SDVersion version = VERSION_SD1,
6261
int clip_skip = -1)
63-
: version(version), tokenizer(version == VERSION_SD2 ? 0 : 49407), embd_dir(embd_dir), wtype(wtype) {
62+
: version(version), tokenizer(version == VERSION_SD2 ? 0 : 49407), embd_dir(embd_dir) {
6463
if (clip_skip <= 0) {
6564
clip_skip = 1;
6665
if (version == VERSION_SD2 || version == VERSION_SDXL) {
6766
clip_skip = 2;
6867
}
6968
}
7069
if (version == VERSION_SD1) {
71-
text_model = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip);
70+
text_model = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip);
7271
} else if (version == VERSION_SD2) {
73-
text_model = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPEN_CLIP_VIT_H_14, clip_skip);
72+
text_model = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "cond_stage_model.transformer.text_model", OPEN_CLIP_VIT_H_14, clip_skip);
7473
} else if (version == VERSION_SDXL) {
75-
text_model = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, false);
76-
text_model2 = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPEN_CLIP_VIT_BIGG_14, clip_skip, false);
74+
text_model = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, false);
75+
text_model2 = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "cond_stage_model.1.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, clip_skip, false);
7776
}
7877
}
7978

@@ -136,14 +135,14 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
136135
LOG_DEBUG("embedding wrong hidden size, got %i, expected %i", tensor_storage.ne[0], hidden_size);
137136
return false;
138137
}
139-
embd = ggml_new_tensor_2d(embd_ctx, wtype, hidden_size, tensor_storage.n_dims > 1 ? tensor_storage.ne[1] : 1);
138+
embd = ggml_new_tensor_2d(embd_ctx, tensor_storage.type, hidden_size, tensor_storage.n_dims > 1 ? tensor_storage.ne[1] : 1);
140139
*dst_tensor = embd;
141140
return true;
142141
};
143142
model_loader.load_tensors(on_load, NULL);
144143
readed_embeddings.push_back(embd_name);
145144
token_embed_custom.resize(token_embed_custom.size() + ggml_nbytes(embd));
146-
memcpy((void*)(token_embed_custom.data() + num_custom_embeddings * hidden_size * ggml_type_size(wtype)),
145+
memcpy((void*)(token_embed_custom.data() + num_custom_embeddings * hidden_size * ggml_type_size(embd->type)),
147146
embd->data,
148147
ggml_nbytes(embd));
149148
for (int i = 0; i < embd->ne[1]; i++) {
@@ -585,9 +584,9 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
585584
struct FrozenCLIPVisionEmbedder : public GGMLRunner {
586585
CLIPVisionModelProjection vision_model;
587586

588-
FrozenCLIPVisionEmbedder(ggml_backend_t backend, ggml_type wtype)
589-
: vision_model(OPEN_CLIP_VIT_H_14, true), GGMLRunner(backend, wtype) {
590-
vision_model.init(params_ctx, wtype);
587+
FrozenCLIPVisionEmbedder(ggml_backend_t backend, std::map<std::string, enum ggml_type>& tensor_types)
588+
: vision_model(OPEN_CLIP_VIT_H_14, true), GGMLRunner(backend) {
589+
vision_model.init(params_ctx, tensor_types, "cond_stage_model.transformer");
591590
}
592591

593592
std::string get_desc() {
@@ -622,7 +621,6 @@ struct FrozenCLIPVisionEmbedder : public GGMLRunner {
622621
};
623622

624623
struct SD3CLIPEmbedder : public Conditioner {
625-
ggml_type wtype;
626624
CLIPTokenizer clip_l_tokenizer;
627625
CLIPTokenizer clip_g_tokenizer;
628626
T5UniGramTokenizer t5_tokenizer;
@@ -631,15 +629,15 @@ struct SD3CLIPEmbedder : public Conditioner {
631629
std::shared_ptr<T5Runner> t5;
632630

633631
SD3CLIPEmbedder(ggml_backend_t backend,
634-
ggml_type wtype,
632+
std::map<std::string, enum ggml_type>& tensor_types,
635633
int clip_skip = -1)
636-
: wtype(wtype), clip_g_tokenizer(0) {
634+
: clip_g_tokenizer(0) {
637635
if (clip_skip <= 0) {
638636
clip_skip = 2;
639637
}
640-
clip_l = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, false);
641-
clip_g = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPEN_CLIP_VIT_BIGG_14, clip_skip, false);
642-
t5 = std::make_shared<T5Runner>(backend, wtype);
638+
clip_l = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, false);
639+
clip_g = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "text_encoders.clip_g.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, clip_skip, false);
640+
t5 = std::make_shared<T5Runner>(backend, tensor_types, "text_encoders.t5xxl.transformer");
643641
}
644642

645643
void set_clip_skip(int clip_skip) {
@@ -798,7 +796,7 @@ struct SD3CLIPEmbedder : public Conditioner {
798796
}
799797

800798
if (chunk_idx == 0) {
801-
auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), clip_l_tokenizer.EOS_TOKEN_ID);
799+
auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), clip_l_tokenizer.EOS_TOKEN_ID);
802800
max_token_idx = std::min<size_t>(std::distance(chunk_tokens.begin(), it), chunk_tokens.size() - 1);
803801
clip_l->compute(n_threads,
804802
input_ids,
@@ -808,7 +806,6 @@ struct SD3CLIPEmbedder : public Conditioner {
808806
true,
809807
&pooled_l,
810808
work_ctx);
811-
812809
}
813810
}
814811

@@ -848,7 +845,7 @@ struct SD3CLIPEmbedder : public Conditioner {
848845
}
849846

850847
if (chunk_idx == 0) {
851-
auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), clip_g_tokenizer.EOS_TOKEN_ID);
848+
auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), clip_g_tokenizer.EOS_TOKEN_ID);
852849
max_token_idx = std::min<size_t>(std::distance(chunk_tokens.begin(), it), chunk_tokens.size() - 1);
853850
clip_g->compute(n_threads,
854851
input_ids,
@@ -858,7 +855,6 @@ struct SD3CLIPEmbedder : public Conditioner {
858855
true,
859856
&pooled_g,
860857
work_ctx);
861-
862858
}
863859
}
864860

@@ -971,21 +967,19 @@ struct SD3CLIPEmbedder : public Conditioner {
971967
};
972968

973969
struct FluxCLIPEmbedder : public Conditioner {
974-
ggml_type wtype;
975970
CLIPTokenizer clip_l_tokenizer;
976971
T5UniGramTokenizer t5_tokenizer;
977972
std::shared_ptr<CLIPTextModelRunner> clip_l;
978973
std::shared_ptr<T5Runner> t5;
979974

980975
FluxCLIPEmbedder(ggml_backend_t backend,
981-
ggml_type wtype,
982-
int clip_skip = -1)
983-
: wtype(wtype) {
976+
std::map<std::string, enum ggml_type>& tensor_types,
977+
int clip_skip = -1) {
984978
if (clip_skip <= 0) {
985979
clip_skip = 2;
986980
}
987-
clip_l = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, true);
988-
t5 = std::make_shared<T5Runner>(backend, wtype);
981+
clip_l = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, true);
982+
t5 = std::make_shared<T5Runner>(backend, tensor_types, "text_encoders.t5xxl.transformer");
989983
}
990984

991985
void set_clip_skip(int clip_skip) {
@@ -1096,9 +1090,9 @@ struct FluxCLIPEmbedder : public Conditioner {
10961090
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, chunk_tokens);
10971091
size_t max_token_idx = 0;
10981092

1099-
auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), clip_l_tokenizer.EOS_TOKEN_ID);
1093+
auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), clip_l_tokenizer.EOS_TOKEN_ID);
11001094
max_token_idx = std::min<size_t>(std::distance(chunk_tokens.begin(), it), chunk_tokens.size() - 1);
1101-
1095+
11021096
clip_l->compute(n_threads,
11031097
input_ids,
11041098
0,
@@ -1107,7 +1101,6 @@ struct FluxCLIPEmbedder : public Conditioner {
11071101
true,
11081102
&pooled,
11091103
work_ctx);
1110-
11111104
}
11121105

11131106
// t5

control.hpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -317,10 +317,12 @@ struct ControlNet : public GGMLRunner {
317317
bool guided_hint_cached = false;
318318

319319
ControlNet(ggml_backend_t backend,
320-
ggml_type wtype,
321320
SDVersion version = VERSION_SD1)
322-
: GGMLRunner(backend, wtype), control_net(version) {
323-
control_net.init(params_ctx, wtype);
321+
: GGMLRunner(backend), control_net(version) {
322+
}
323+
324+
void init_params(std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix) {
325+
control_net.init(params_ctx, tensor_types, prefix);
324326
}
325327

326328
~ControlNet() {

diffusion_model.hpp

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,10 @@ struct UNetModel : public DiffusionModel {
3030
UNetModelRunner unet;
3131

3232
UNetModel(ggml_backend_t backend,
33-
ggml_type wtype,
33+
std::map<std::string, enum ggml_type>& tensor_types,
3434
SDVersion version = VERSION_SD1)
35-
: unet(backend, wtype, version) {
35+
: unet(backend, version) {
36+
unet.init_params(tensor_types, "model.diffusion_model");
3637
}
3738

3839
void alloc_params_buffer() {
@@ -79,9 +80,9 @@ struct MMDiTModel : public DiffusionModel {
7980
MMDiTRunner mmdit;
8081

8182
MMDiTModel(ggml_backend_t backend,
82-
ggml_type wtype,
83+
std::map<std::string, enum ggml_type>& tensor_types,
8384
SDVersion version = VERSION_SD3_2B)
84-
: mmdit(backend, wtype, version) {
85+
: mmdit(backend, tensor_types, "model.diffusion_model", version) {
8586
}
8687

8788
void alloc_params_buffer() {
@@ -128,9 +129,9 @@ struct FluxModel : public DiffusionModel {
128129
Flux::FluxRunner flux;
129130

130131
FluxModel(ggml_backend_t backend,
131-
ggml_type wtype,
132+
std::map<std::string, enum ggml_type>& tensor_types,
132133
SDVersion version = VERSION_FLUX_DEV)
133-
: flux(backend, wtype, version) {
134+
: flux(backend, tensor_types, "model.diffusion_model", version) {
134135
}
135136

136137
void alloc_params_buffer() {

0 commit comments

Comments
 (0)