From 6cbcbe0d94bbccccb19b27ff2400e3880f6aade3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 1 Nov 2024 00:59:08 +0100 Subject: [PATCH 01/12] Refactor: wtype per tensor --- clip.hpp | 46 +++++++++++++++++------------ common.hpp | 12 +++++--- conditioner.hpp | 44 +++++++++++++--------------- control.hpp | 8 ++++-- diffusion_model.hpp | 13 +++++---- esrgan.hpp | 9 +++--- examples/cli/main.cpp | 3 +- flux.hpp | 16 ++++++----- ggml_extend.hpp | 67 ++++++++++++++++++++++++++----------------- lora.hpp | 5 ++-- mmdit.hpp | 21 ++++++++------ model.cpp | 6 +++- model.h | 4 ++- pmid.hpp | 6 ++-- stable-diffusion.cpp | 39 +++++++++++++------------ stable-diffusion.h | 3 +- t5.hpp | 31 +++++++++++--------- tae.hpp | 8 +++--- unet.hpp | 8 ++++-- upscaler.cpp | 5 ++-- vae.hpp | 12 ++++---- 21 files changed, 203 insertions(+), 163 deletions(-) diff --git a/clip.hpp b/clip.hpp index 46e52ada4..cfc4cb38c 100644 --- a/clip.hpp +++ b/clip.hpp @@ -545,9 +545,12 @@ class CLIPEmbeddings : public GGMLBlock { int64_t vocab_size; int64_t num_positions; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["token_embedding.weight"] = ggml_new_tensor_2d(ctx, wtype, embed_dim, vocab_size); - params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, embed_dim, num_positions); + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type token_wtype = (tensor_types.find(prefix + "token_embedding.weight") != tensor_types.end()) ? tensor_types[prefix + "token_embedding.weight"] : GGML_TYPE_F32; + enum ggml_type position_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "position_embedding.weight") != tensor_types.end()) ? tensor_types[prefix + "position_embedding.weight"] : GGML_TYPE_F32; + + params["token_embedding.weight"] = ggml_new_tensor_2d(ctx, token_wtype, embed_dim, vocab_size); + params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, position_wtype, embed_dim, num_positions); } public: @@ -591,11 +594,14 @@ class CLIPVisionEmbeddings : public GGMLBlock { int64_t image_size; int64_t num_patches; int64_t num_positions; + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type patch_wtype = GGML_TYPE_F16; // tensor_types.find(prefix + "patch_embedding.weight") != tensor_types.end() ? tensor_types[prefix + "patch_embedding.weight"] : GGML_TYPE_F16; + enum ggml_type class_wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "class_embedding") != tensor_types.end() ? tensor_types[prefix + "class_embedding"] : GGML_TYPE_F32; + enum ggml_type position_wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "position_embedding.weight") != tensor_types.end() ? tensor_types[prefix + "position_embedding.weight"] : GGML_TYPE_F32; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["patch_embedding.weight"] = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, patch_size, patch_size, num_channels, embed_dim); - params["class_embedding"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, embed_dim); - params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, embed_dim, num_positions); + params["patch_embedding.weight"] = ggml_new_tensor_4d(ctx, patch_wtype, patch_size, patch_size, num_channels, embed_dim); + params["class_embedding"] = ggml_new_tensor_1d(ctx, class_wtype, embed_dim); + params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, position_wtype, embed_dim, num_positions); } public: @@ -651,9 +657,10 @@ enum CLIPVersion { class CLIPTextModel : public GGMLBlock { protected: - void init_params(struct ggml_context* ctx, ggml_type wtype) { + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { if (version == OPEN_CLIP_VIT_BIGG_14) { - params["text_projection"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, projection_dim, hidden_size); + enum ggml_type wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "text_projection") != tensor_types.end() ? tensor_types[prefix + "text_projection"] : GGML_TYPE_F32; + params["text_projection"] = ggml_new_tensor_2d(ctx, wtype, projection_dim, hidden_size); } } @@ -798,9 +805,9 @@ class CLIPProjection : public UnaryBlock { int64_t out_features; bool transpose_weight; - void init_params(struct ggml_context* ctx, ggml_type wtype) { + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type wtype = tensor_types.find(prefix + "weight") != tensor_types.end() ? tensor_types[prefix + "weight"] : GGML_TYPE_F32; if (transpose_weight) { - LOG_ERROR("transpose_weight"); params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features); } else { params["weight"] = ggml_new_tensor_2d(ctx, wtype, in_features, out_features); @@ -861,12 +868,13 @@ struct CLIPTextModelRunner : public GGMLRunner { CLIPTextModel model; CLIPTextModelRunner(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, + const std::string prefix, CLIPVersion version = OPENAI_CLIP_VIT_L_14, int clip_skip_value = 1, bool with_final_ln = true) - : GGMLRunner(backend, wtype), model(version, clip_skip_value, with_final_ln) { - model.init(params_ctx, wtype); + : GGMLRunner(backend), model(version, clip_skip_value, with_final_ln) { + model.init(params_ctx, tensor_types, prefix); } std::string get_desc() { @@ -908,13 +916,13 @@ struct CLIPTextModelRunner : public GGMLRunner { struct ggml_tensor* embeddings = NULL; if (num_custom_embeddings > 0 && custom_embeddings_data != NULL) { - auto custom_embeddings = ggml_new_tensor_2d(compute_ctx, - wtype, - model.hidden_size, - num_custom_embeddings); + auto token_embed_weight = model.get_token_embed_weight(); + auto custom_embeddings = ggml_new_tensor_2d(compute_ctx, + token_embed_weight->type, + model.hidden_size, + num_custom_embeddings); set_backend_tensor_data(custom_embeddings, custom_embeddings_data); - auto token_embed_weight = model.get_token_embed_weight(); // concatenate custom embeddings embeddings = ggml_concat(compute_ctx, token_embed_weight, custom_embeddings, 1); } diff --git a/common.hpp b/common.hpp index 1ca6b8d0d..da8353b36 100644 --- a/common.hpp +++ b/common.hpp @@ -182,9 +182,11 @@ class GEGLU : public GGMLBlock { int64_t dim_in; int64_t dim_out; - void init_params(struct ggml_context* ctx, ggml_type wtype) { + void init_params(struct ggml_context* ctx, std::map& tensor_types, std::string prefix = "") { + enum ggml_type wtype = (tensor_types.find(prefix + "proj.weight") != tensor_types.end()) ? tensor_types[prefix + "proj.weight"] : GGML_TYPE_F32; + enum ggml_type bias_wtype = GGML_TYPE_F32;//(tensor_types.find(prefix + "proj.bias") != tensor_types.end()) ? tensor_types[prefix + "proj.bias"] : GGML_TYPE_F32; params["proj.weight"] = ggml_new_tensor_2d(ctx, wtype, dim_in, dim_out * 2); - params["proj.bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, dim_out * 2); + params["proj.bias"] = ggml_new_tensor_1d(ctx, bias_wtype, dim_out * 2); } public: @@ -438,8 +440,10 @@ class SpatialTransformer : public GGMLBlock { class AlphaBlender : public GGMLBlock { protected: - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["mix_factor"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); + void init_params(struct ggml_context* ctx, std::map& tensor_types, std::string prefix = "") { + // Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix + enum ggml_type wtype = GGML_TYPE_F32;//(tensor_types.ypes.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32; + params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1); } float get_alpha() { diff --git a/conditioner.hpp b/conditioner.hpp index 9a6300997..5b3f20dd1 100644 --- a/conditioner.hpp +++ b/conditioner.hpp @@ -46,7 +46,6 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { SDVersion version = VERSION_SD1; PMVersion pm_version = PM_VERSION_1; CLIPTokenizer tokenizer; - ggml_type wtype; std::shared_ptr text_model; std::shared_ptr text_model2; @@ -57,12 +56,12 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { std::vector readed_embeddings; FrozenCLIPEmbedderWithCustomWords(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, const std::string& embd_dir, SDVersion version = VERSION_SD1, PMVersion pv = PM_VERSION_1, int clip_skip = -1) - : version(version), pm_version(pv), tokenizer(version == VERSION_SD2 ? 0 : 49407), embd_dir(embd_dir), wtype(wtype) { + : version(version), pm_version(pv), tokenizer(version == VERSION_SD2 ? 0 : 49407), embd_dir(embd_dir) { if (clip_skip <= 0) { clip_skip = 1; if (version == VERSION_SD2 || version == VERSION_SDXL) { @@ -70,12 +69,12 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { } } if (version == VERSION_SD1) { - text_model = std::make_shared(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip); + text_model = std::make_shared(backend, tensor_types, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip); } else if (version == VERSION_SD2) { - text_model = std::make_shared(backend, wtype, OPEN_CLIP_VIT_H_14, clip_skip); + text_model = std::make_shared(backend, tensor_types, "cond_stage_model.transformer.text_model", OPEN_CLIP_VIT_H_14, clip_skip); } else if (version == VERSION_SDXL) { - text_model = std::make_shared(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, false); - text_model2 = std::make_shared(backend, wtype, OPEN_CLIP_VIT_BIGG_14, clip_skip, false); + text_model = std::make_shared(backend, tensor_types, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, false); + text_model2 = std::make_shared(backend, tensor_types, "cond_stage_model.1.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, clip_skip, false); } } @@ -138,14 +137,14 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { LOG_DEBUG("embedding wrong hidden size, got %i, expected %i", tensor_storage.ne[0], hidden_size); return false; } - embd = ggml_new_tensor_2d(embd_ctx, wtype, hidden_size, tensor_storage.n_dims > 1 ? tensor_storage.ne[1] : 1); + embd = ggml_new_tensor_2d(embd_ctx, tensor_storage.type, hidden_size, tensor_storage.n_dims > 1 ? tensor_storage.ne[1] : 1); *dst_tensor = embd; return true; }; model_loader.load_tensors(on_load, NULL); readed_embeddings.push_back(embd_name); token_embed_custom.resize(token_embed_custom.size() + ggml_nbytes(embd)); - memcpy((void*)(token_embed_custom.data() + num_custom_embeddings * hidden_size * ggml_type_size(wtype)), + memcpy((void*)(token_embed_custom.data() + num_custom_embeddings * hidden_size * ggml_type_size(embd->type)), embd->data, ggml_nbytes(embd)); for (int i = 0; i < embd->ne[1]; i++) { @@ -590,9 +589,9 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { struct FrozenCLIPVisionEmbedder : public GGMLRunner { CLIPVisionModelProjection vision_model; - FrozenCLIPVisionEmbedder(ggml_backend_t backend, ggml_type wtype) - : vision_model(OPEN_CLIP_VIT_H_14, true), GGMLRunner(backend, wtype) { - vision_model.init(params_ctx, wtype); + FrozenCLIPVisionEmbedder(ggml_backend_t backend, std::map& tensor_types) + : vision_model(OPEN_CLIP_VIT_H_14, true), GGMLRunner(backend) { + vision_model.init(params_ctx, tensor_types, "cond_stage_model.transformer"); } std::string get_desc() { @@ -627,7 +626,6 @@ struct FrozenCLIPVisionEmbedder : public GGMLRunner { }; struct SD3CLIPEmbedder : public Conditioner { - ggml_type wtype; CLIPTokenizer clip_l_tokenizer; CLIPTokenizer clip_g_tokenizer; T5UniGramTokenizer t5_tokenizer; @@ -636,15 +634,15 @@ struct SD3CLIPEmbedder : public Conditioner { std::shared_ptr t5; SD3CLIPEmbedder(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, int clip_skip = -1) - : wtype(wtype), clip_g_tokenizer(0) { + : clip_g_tokenizer(0) { if (clip_skip <= 0) { clip_skip = 2; } - clip_l = std::make_shared(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, false); - clip_g = std::make_shared(backend, wtype, OPEN_CLIP_VIT_BIGG_14, clip_skip, false); - t5 = std::make_shared(backend, wtype); + clip_l = std::make_shared(backend, tensor_types, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, false); + clip_g = std::make_shared(backend, tensor_types, "text_encoders.clip_g.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, clip_skip, false); + t5 = std::make_shared(backend, tensor_types, "text_encoders.t5xxl.transformer"); } void set_clip_skip(int clip_skip) { @@ -974,21 +972,19 @@ struct SD3CLIPEmbedder : public Conditioner { }; struct FluxCLIPEmbedder : public Conditioner { - ggml_type wtype; CLIPTokenizer clip_l_tokenizer; T5UniGramTokenizer t5_tokenizer; std::shared_ptr clip_l; std::shared_ptr t5; FluxCLIPEmbedder(ggml_backend_t backend, - ggml_type wtype, - int clip_skip = -1) - : wtype(wtype) { + std::map& tensor_types, + int clip_skip = -1) { if (clip_skip <= 0) { clip_skip = 2; } - clip_l = std::make_shared(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, true); - t5 = std::make_shared(backend, wtype); + clip_l = std::make_shared(backend, tensor_types, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, true); + t5 = std::make_shared(backend, tensor_types, "text_encoders.t5xxl.transformer"); } void set_clip_skip(int clip_skip) { diff --git a/control.hpp b/control.hpp index 41f31acb7..3b2c1e1b9 100644 --- a/control.hpp +++ b/control.hpp @@ -317,10 +317,12 @@ struct ControlNet : public GGMLRunner { bool guided_hint_cached = false; ControlNet(ggml_backend_t backend, - ggml_type wtype, SDVersion version = VERSION_SD1) - : GGMLRunner(backend, wtype), control_net(version) { - control_net.init(params_ctx, wtype); + : GGMLRunner(backend), control_net(version) { + } + + void init_params(std::map& tensor_types, const std::string prefix) { + control_net.init(params_ctx, tensor_types, prefix); } ~ControlNet() { diff --git a/diffusion_model.hpp b/diffusion_model.hpp index eb433b614..8e5e2a6f5 100644 --- a/diffusion_model.hpp +++ b/diffusion_model.hpp @@ -31,10 +31,11 @@ struct UNetModel : public DiffusionModel { UNetModelRunner unet; UNetModel(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, SDVersion version = VERSION_SD1, bool flash_attn = false) - : unet(backend, wtype, version, flash_attn) { + : unet(backend, version, flash_attn) { + unet.init_params(tensor_types, "model.diffusion_model"); } void alloc_params_buffer() { @@ -83,9 +84,9 @@ struct MMDiTModel : public DiffusionModel { MMDiTRunner mmdit; MMDiTModel(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, SDVersion version = VERSION_SD3_2B) - : mmdit(backend, wtype, version) { + : mmdit(backend, tensor_types, "model.diffusion_model", version) { } void alloc_params_buffer() { @@ -133,10 +134,10 @@ struct FluxModel : public DiffusionModel { Flux::FluxRunner flux; FluxModel(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, SDVersion version = VERSION_FLUX_DEV, bool flash_attn = false) - : flux(backend, wtype, version, flash_attn) { + : flux(backend, tensor_types, "model.diffusion_model", version, flash_attn) { } void alloc_params_buffer() { diff --git a/esrgan.hpp b/esrgan.hpp index 33fcf09a4..1f39abc67 100644 --- a/esrgan.hpp +++ b/esrgan.hpp @@ -142,10 +142,11 @@ struct ESRGAN : public GGMLRunner { int scale = 4; int tile_size = 128; // avoid cuda OOM for 4gb VRAM - ESRGAN(ggml_backend_t backend, - ggml_type wtype) - : GGMLRunner(backend, wtype) { - rrdb_net.init(params_ctx, wtype); + ESRGAN(ggml_backend_t backend) + : GGMLRunner(backend) { + } + void init_params(std::map& tensor_types, const std::string prefix) { + rrdb_net.init(params_ctx, tensor_types, prefix); } std::string get_desc() { diff --git a/examples/cli/main.cpp b/examples/cli/main.cpp index 59b325504..93e6d3e43 100644 --- a/examples/cli/main.cpp +++ b/examples/cli/main.cpp @@ -1010,8 +1010,7 @@ int main(int argc, const char* argv[]) { int upscale_factor = 4; // unused for RealESRGAN_x4plus_anime_6B.pth if (params.esrgan_path.size() > 0 && params.upscale_repeats > 0) { upscaler_ctx_t* upscaler_ctx = new_upscaler_ctx(params.esrgan_path.c_str(), - params.n_threads, - params.wtype); + params.n_threads); if (upscaler_ctx == NULL) { printf("new_upscaler_ctx failed\n"); diff --git a/flux.hpp b/flux.hpp index b2d0f57c2..6f85cb4b8 100644 --- a/flux.hpp +++ b/flux.hpp @@ -35,8 +35,9 @@ namespace Flux { int64_t hidden_size; float eps; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["scale"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hidden_size); + void init_params(struct ggml_context* ctx, const std::string prefix, std::map& tensor_types, std::map& params) { + ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "scale") != tensor_types.end()) ? tensor_types[prefix + "scale"] : GGML_TYPE_F32; + params["scale"] = ggml_new_tensor_1d(ctx, wtype, hidden_size); } public: @@ -829,10 +830,11 @@ namespace Flux { std::vector pe_vec; // for cache FluxRunner(ggml_backend_t backend, - ggml_type wtype, - SDVersion version = VERSION_FLUX_DEV, + std::map& tensor_types = std::map(), + const std::string prefix = "", + SDVersion version = VERSION_FLUX_DEV, bool flash_attn = false) - : GGMLRunner(backend, wtype) { + : GGMLRunner(backend) { flux_params.flash_attn = flash_attn; if (version == VERSION_FLUX_SCHNELL) { flux_params.guidance_embed = false; @@ -841,7 +843,7 @@ namespace Flux { flux_params.depth = 8; } flux = Flux(flux_params); - flux.init(params_ctx, wtype); + flux.init(params_ctx, tensor_types, prefix); } std::string get_desc() { @@ -959,7 +961,7 @@ namespace Flux { // ggml_backend_t backend = ggml_backend_cuda_init(0); ggml_backend_t backend = ggml_backend_cpu_init(); ggml_type model_data_type = GGML_TYPE_Q8_0; - std::shared_ptr flux = std::shared_ptr(new FluxRunner(backend, model_data_type)); + std::shared_ptr flux = std::shared_ptr(new FluxRunner(backend)); { LOG_INFO("loading from '%s'", file_path.c_str()); diff --git a/ggml_extend.hpp b/ggml_extend.hpp index e944deb69..8c84ba3f3 100644 --- a/ggml_extend.hpp +++ b/ggml_extend.hpp @@ -25,6 +25,8 @@ #include "ggml-cpu.h" #include "ggml.h" +#include "model.h" + #ifdef SD_USE_CUBLAS #include "ggml-cuda.h" #endif @@ -964,7 +966,6 @@ struct GGMLRunner { std::map backend_tensor_data_map; - ggml_type wtype = GGML_TYPE_F32; ggml_backend_t backend = NULL; void alloc_params_ctx() { @@ -1040,8 +1041,8 @@ struct GGMLRunner { public: virtual std::string get_desc() = 0; - GGMLRunner(ggml_backend_t backend, ggml_type wtype = GGML_TYPE_F32) - : backend(backend), wtype(wtype) { + GGMLRunner(ggml_backend_t backend) + : backend(backend) { alloc_params_ctx(); } @@ -1170,20 +1171,22 @@ class GGMLBlock { GGMLBlockMap blocks; ParameterMap params; - void init_blocks(struct ggml_context* ctx, ggml_type wtype) { + void init_blocks(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { for (auto& pair : blocks) { auto& block = pair.second; - - block->init(ctx, wtype); + block->init(ctx, tensor_types, prefix + pair.first); } } - virtual void init_params(struct ggml_context* ctx, ggml_type wtype) {} + virtual void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") {} public: - void init(struct ggml_context* ctx, ggml_type wtype) { - init_blocks(ctx, wtype); - init_params(ctx, wtype); + void init(struct ggml_context* ctx, std::map& tensor_types, std::string prefix = "") { + if (prefix.size() > 0) { + prefix = prefix + "."; + } + init_blocks(ctx, tensor_types, prefix); + init_params(ctx, tensor_types, prefix); } size_t get_params_num() { @@ -1239,13 +1242,15 @@ class Linear : public UnaryBlock { bool bias; bool force_f32; - void init_params(struct ggml_context* ctx, ggml_type wtype) { + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type wtype = (tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32; if (in_features % ggml_blck_size(wtype) != 0 || force_f32) { wtype = GGML_TYPE_F32; } params["weight"] = ggml_new_tensor_2d(ctx, wtype, in_features, out_features); if (bias) { - params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, out_features); + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32; + params["bias"] = ggml_new_tensor_1d(ctx, wtype, out_features); } } @@ -1273,9 +1278,9 @@ class Embedding : public UnaryBlock { protected: int64_t embedding_dim; int64_t num_embeddings; - - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["weight"] = ggml_new_tensor_2d(ctx, wtype, embedding_dim, num_embeddings); + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type wtype = (tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32; + params["weight"] = ggml_new_tensor_2d(ctx, wtype, embedding_dim, num_embeddings); } public: @@ -1313,10 +1318,12 @@ class Conv2d : public UnaryBlock { std::pair dilation; bool bias; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["weight"] = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kernel_size.second, kernel_size.first, in_channels, out_channels); + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type wtype = GGML_TYPE_F16; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F16; + params["weight"] = ggml_new_tensor_4d(ctx, wtype, kernel_size.second, kernel_size.first, in_channels, out_channels); if (bias) { - params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, out_channels); + enum ggml_type wtype = GGML_TYPE_F32; // (tensor_types.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32; + params["bias"] = ggml_new_tensor_1d(ctx, wtype, out_channels); } } @@ -1356,10 +1363,12 @@ class Conv3dnx1x1 : public UnaryBlock { int64_t dilation; bool bias; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["weight"] = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, 1, kernel_size, in_channels, out_channels); // 5d => 4d + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type wtype = GGML_TYPE_F16; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F16; + params["weight"] = ggml_new_tensor_4d(ctx, wtype, 1, kernel_size, in_channels, out_channels); // 5d => 4d if (bias) { - params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, out_channels); + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32; + params["bias"] = ggml_new_tensor_1d(ctx, wtype, out_channels); } } @@ -1398,11 +1407,13 @@ class LayerNorm : public UnaryBlock { bool elementwise_affine; bool bias; - void init_params(struct ggml_context* ctx, ggml_type wtype) { + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { if (elementwise_affine) { - params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, normalized_shape); + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32; + params["weight"] = ggml_new_tensor_1d(ctx, wtype, normalized_shape); if (bias) { - params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, normalized_shape); + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32; + params["bias"] = ggml_new_tensor_1d(ctx, wtype, normalized_shape); } } } @@ -1438,10 +1449,12 @@ class GroupNorm : public GGMLBlock { float eps; bool affine; - void init_params(struct ggml_context* ctx, ggml_type wtype) { + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { if (affine) { - params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, num_channels); - params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, num_channels); + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32; + enum ggml_type bias_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32; + params["weight"] = ggml_new_tensor_1d(ctx, wtype, num_channels); + params["bias"] = ggml_new_tensor_1d(ctx, bias_wtype, num_channels); } } diff --git a/lora.hpp b/lora.hpp index c44db7698..43daba48f 100644 --- a/lora.hpp +++ b/lora.hpp @@ -16,10 +16,9 @@ struct LoraModel : public GGMLRunner { ggml_tensor* zero_index = NULL; LoraModel(ggml_backend_t backend, - ggml_type wtype, const std::string& file_path = "", - const std::string& prefix = "") - : file_path(file_path), GGMLRunner(backend, wtype) { + const std::string prefix = "") + : file_path(file_path), GGMLRunner(backend) { if (!model_loader.init_from_file(file_path, prefix)) { load_failed = true; } diff --git a/mmdit.hpp b/mmdit.hpp index 35810bad9..253679354 100644 --- a/mmdit.hpp +++ b/mmdit.hpp @@ -147,8 +147,9 @@ class RMSNorm : public UnaryBlock { int64_t hidden_size; float eps; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hidden_size); + void init_params(struct ggml_context* ctx, std::map& tensor_types, std::string prefix = "") { + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32; + params["weight"] = ggml_new_tensor_1d(ctx, wtype, hidden_size); } public: @@ -652,8 +653,9 @@ struct MMDiT : public GGMLBlock { int64_t hidden_size; std::string qk_norm; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["pos_embed"] = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, hidden_size, num_patchs, 1); + void init_params(struct ggml_context* ctx, std::map& tensor_types, std::string prefix = "") { + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "pos_embed") != tensor_types.end()) ? tensor_types[prefix + "pos_embed"] : GGML_TYPE_F32; + params["pos_embed"] = ggml_new_tensor_3d(ctx, wtype, hidden_size, num_patchs, 1); } public: @@ -875,10 +877,11 @@ struct MMDiTRunner : public GGMLRunner { MMDiT mmdit; MMDiTRunner(ggml_backend_t backend, - ggml_type wtype, - SDVersion version = VERSION_SD3_2B) - : GGMLRunner(backend, wtype), mmdit(version) { - mmdit.init(params_ctx, wtype); + std::map& tensor_types = std::map(), + const std::string prefix = "", + SDVersion version = VERSION_SD3_2B) + : GGMLRunner(backend), mmdit(version) { + mmdit.init(params_ctx, tensor_types, prefix); } std::string get_desc() { @@ -975,7 +978,7 @@ struct MMDiTRunner : public GGMLRunner { // ggml_backend_t backend = ggml_backend_cuda_init(0); ggml_backend_t backend = ggml_backend_cpu_init(); ggml_type model_data_type = GGML_TYPE_F16; - std::shared_ptr mmdit = std::shared_ptr(new MMDiTRunner(backend, model_data_type)); + std::shared_ptr mmdit = std::shared_ptr(new MMDiTRunner(backend)); { LOG_INFO("loading from '%s'", file_path.c_str()); diff --git a/model.cpp b/model.cpp index dba8187da..9e3719406 100644 --- a/model.cpp +++ b/model.cpp @@ -927,6 +927,7 @@ bool ModelLoader::init_from_gguf_file(const std::string& file_path, const std::s GGML_ASSERT(ggml_nbytes(dummy) == tensor_storage.nbytes()); tensor_storages.push_back(tensor_storage); + tensor_storages_types[tensor_storage.name] = tensor_storage.type; } gguf_free(ctx_gguf_); @@ -1071,6 +1072,7 @@ bool ModelLoader::init_from_safetensors_file(const std::string& file_path, const } tensor_storages.push_back(tensor_storage); + tensor_storages_types[tensor_storage.name] = tensor_storage.type; // LOG_DEBUG("%s %s", tensor_storage.to_string().c_str(), dtype.c_str()); } @@ -1296,7 +1298,7 @@ bool ModelLoader::parse_data_pkl(uint8_t* buffer, zip_t* zip, std::string dir, size_t file_index, - const std::string& prefix) { + const std::string prefix) { uint8_t* buffer_end = buffer + buffer_size; if (buffer[0] == 0x80) { // proto if (buffer[1] != 2) { @@ -1401,6 +1403,8 @@ bool ModelLoader::parse_data_pkl(uint8_t* buffer, // printf(" ZIP got tensor %s \n ", reader.tensor_storage.name.c_str()); reader.tensor_storage.name = prefix + reader.tensor_storage.name; tensor_storages.push_back(reader.tensor_storage); + tensor_storages_types[reader.tensor_storage.name] = reader.tensor_storage.type; + // LOG_DEBUG("%s", reader.tensor_storage.name.c_str()); // reset reader = PickleTensorReader(); diff --git a/model.h b/model.h index b7e3b3a2e..6100fa578 100644 --- a/model.h +++ b/model.h @@ -170,7 +170,7 @@ class ModelLoader { zip_t* zip, std::string dir, size_t file_index, - const std::string& prefix); + const std::string prefix); bool init_from_gguf_file(const std::string& file_path, const std::string& prefix = ""); bool init_from_safetensors_file(const std::string& file_path, const std::string& prefix = ""); @@ -178,6 +178,8 @@ class ModelLoader { bool init_from_diffusers_file(const std::string& file_path, const std::string& prefix = ""); public: + std::map tensor_storages_types; + bool init_from_file(const std::string& file_path, const std::string& prefix = ""); SDVersion get_sd_version(); ggml_type get_sd_wtype(); diff --git a/pmid.hpp b/pmid.hpp index defb4f05a..3d719a907 100644 --- a/pmid.hpp +++ b/pmid.hpp @@ -623,13 +623,13 @@ struct PhotoMakerIDEncoder : public GGMLRunner { std::vector zeros_right; public: - PhotoMakerIDEncoder(ggml_backend_t backend, ggml_type wtype, SDVersion version = VERSION_SDXL, PMVersion pm_v = PM_VERSION_1, float sty = 20.f) - : GGMLRunner(backend, wtype), + PhotoMakerIDEncoder(ggml_backend_t backend, std::map& tensor_types, const std::string prefix, SDVersion version = VERSION_SDXL, PMVersion pm_v = PM_VERSION_1, float sty = 20.f) + : GGMLRunner(backend), version(version), pm_version(pm_v), style_strength(sty) { if (pm_version == PM_VERSION_1) { - id_encoder.init(params_ctx, wtype); + id_encoder.init(params_ctx, tensor_types, prefix); } else if (pm_version == PM_VERSION_2) { id_encoder2.init(params_ctx, wtype); } diff --git a/stable-diffusion.cpp b/stable-diffusion.cpp index a276bff5c..721e516f8 100644 --- a/stable-diffusion.cpp +++ b/stable-diffusion.cpp @@ -270,10 +270,10 @@ class StableDiffusionGGML { vae_wtype = GGML_TYPE_F32; } - LOG_INFO("Weight type: %s", ggml_type_name(model_wtype)); - LOG_INFO("Conditioner weight type: %s", ggml_type_name(conditioner_wtype)); - LOG_INFO("Diffusion model weight type: %s", ggml_type_name(diffusion_model_wtype)); - LOG_INFO("VAE weight type: %s", ggml_type_name(vae_wtype)); + LOG_INFO("Weight type: %s", model_wtype != SD_TYPE_COUNT ? ggml_type_name(model_wtype) : "??"); + LOG_INFO("Conditioner weight type: %s", conditioner_wtype != SD_TYPE_COUNT ? ggml_type_name(conditioner_wtype) : "??"); + LOG_INFO("Diffusion model weight type: %s", diffusion_model_wtype != SD_TYPE_COUNT ? ggml_type_name(diffusion_model_wtype) : "??"); + LOG_INFO("VAE weight type: %s", vae_wtype != SD_TYPE_COUNT ? ggml_type_name(vae_wtype) : "??"); LOG_DEBUG("ggml tensor size = %d bytes", (int)sizeof(ggml_tensor)); @@ -294,15 +294,15 @@ class StableDiffusionGGML { } if (version == VERSION_SVD) { - clip_vision = std::make_shared(backend, conditioner_wtype); + clip_vision = std::make_shared(backend, model_loader.tensor_storages_types); clip_vision->alloc_params_buffer(); clip_vision->get_param_tensors(tensors); - diffusion_model = std::make_shared(backend, diffusion_model_wtype, version); + diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types, version); diffusion_model->alloc_params_buffer(); diffusion_model->get_param_tensors(tensors); - first_stage_model = std::make_shared(backend, vae_wtype, vae_decode_only, true, version); + first_stage_model = std::make_shared(backend, model_loader.tensor_storages_types, "first_stage_model", vae_decode_only, true, version); LOG_DEBUG("vae_decode_only %d", vae_decode_only); first_stage_model->alloc_params_buffer(); first_stage_model->get_param_tensors(tensors, "first_stage_model"); @@ -327,19 +327,20 @@ class StableDiffusionGGML { if (diffusion_flash_attn) { LOG_WARN("flash attention in this diffusion model is currently unsupported!"); } - cond_stage_model = std::make_shared(clip_backend, conditioner_wtype); - diffusion_model = std::make_shared(backend, diffusion_model_wtype, version); + cond_stage_model = std::make_shared(clip_backend, model_loader.tensor_storages_types); + diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types, version); } else if (sd_version_is_flux(version)) { - cond_stage_model = std::make_shared(clip_backend, conditioner_wtype); - diffusion_model = std::make_shared(backend, diffusion_model_wtype, version, diffusion_flash_attn); + cond_stage_model = std::make_shared(clip_backend, model_loader.tensor_storages_types); + diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types, version, diffusion_flash_attn); } else { if (id_embeddings_path.find("v2") != std::string::npos) { cond_stage_model = std::make_shared(clip_backend, conditioner_wtype, embeddings_path, version, PM_VERSION_2); } else { - cond_stage_model = std::make_shared(clip_backend, conditioner_wtype, embeddings_path, version); + cond_stage_model = std::make_shared(clip_backend, model_loader.tensor_storages_types, embeddings_path, version); } - diffusion_model = std::make_shared(backend, diffusion_model_wtype, version, diffusion_flash_attn); + diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types, version, diffusion_flash_attn); } + cond_stage_model->alloc_params_buffer(); cond_stage_model->get_param_tensors(tensors); @@ -353,11 +354,11 @@ class StableDiffusionGGML { } else { vae_backend = backend; } - first_stage_model = std::make_shared(vae_backend, vae_wtype, vae_decode_only, false, version); + first_stage_model = std::make_shared(vae_backend, model_loader.tensor_storages_types, "first_stage_model", vae_decode_only, false, version); first_stage_model->alloc_params_buffer(); first_stage_model->get_param_tensors(tensors, "first_stage_model"); } else { - tae_first_stage = std::make_shared(backend, vae_wtype, vae_decode_only); + tae_first_stage = std::make_shared(backend, vae_decode_only); } // first_stage_model->get_param_tensors(tensors, "first_stage_model."); @@ -369,17 +370,17 @@ class StableDiffusionGGML { } else { controlnet_backend = backend; } - control_net = std::make_shared(controlnet_backend, diffusion_model_wtype, version); + control_net = std::make_shared(controlnet_backend, version); } if (id_embeddings_path.find("v2") != std::string::npos) { pmid_model = std::make_shared(backend, model_wtype, version, PM_VERSION_2); LOG_INFO("using PhotoMaker Version 2"); } else { - pmid_model = std::make_shared(backend, model_wtype, version); + pmid_model = std::make_shared(backend,model_loader.tensor_storages_types, "pmid", version); } if (id_embeddings_path.size() > 0) { - pmid_lora = std::make_shared(backend, model_wtype, id_embeddings_path, ""); + pmid_lora = std::make_shared(backend, id_embeddings_path, ""); if (!pmid_lora->load_from_file(true)) { LOG_WARN("load photomaker lora tensors from %s failed", id_embeddings_path.c_str()); return false; @@ -633,7 +634,7 @@ class StableDiffusionGGML { LOG_WARN("can not find %s or %s for lora %s", st_file_path.c_str(), ckpt_file_path.c_str(), lora_name.c_str()); return; } - LoraModel lora(backend, model_wtype, file_path); + LoraModel lora(backend, file_path); if (!lora.load_from_file()) { LOG_WARN("load lora tensors from %s failed", file_path.c_str()); return; diff --git a/stable-diffusion.h b/stable-diffusion.h index 1fa328570..c67bc8a32 100644 --- a/stable-diffusion.h +++ b/stable-diffusion.h @@ -215,8 +215,7 @@ SD_API sd_image_t* img2vid(sd_ctx_t* sd_ctx, typedef struct upscaler_ctx_t upscaler_ctx_t; SD_API upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path, - int n_threads, - enum sd_type_t wtype); + int n_threads); SD_API void free_upscaler_ctx(upscaler_ctx_t* upscaler_ctx); SD_API sd_image_t upscale(upscaler_ctx_t* upscaler_ctx, sd_image_t input_image, uint32_t upscale_factor); diff --git a/t5.hpp b/t5.hpp index 79109e34b..2ad3b1105 100644 --- a/t5.hpp +++ b/t5.hpp @@ -357,7 +357,7 @@ class T5UniGramTokenizer { BuildTrie(&pieces); } - ~T5UniGramTokenizer(){}; + ~T5UniGramTokenizer() {}; std::string Normalize(const std::string& input) const { // Ref: https://github.com/huggingface/tokenizers/blob/1ff56c0c70b045f0cd82da1af9ac08cd4c7a6f9f/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py#L29 @@ -441,8 +441,9 @@ class T5LayerNorm : public UnaryBlock { int64_t hidden_size; float eps; - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hidden_size); + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32; + params["weight"] = ggml_new_tensor_1d(ctx, wtype, hidden_size); } public: @@ -717,14 +718,15 @@ struct T5Runner : public GGMLRunner { std::vector relative_position_bucket_vec; T5Runner(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, + const std::string prefix, int64_t num_layers = 24, int64_t model_dim = 4096, int64_t ff_dim = 10240, int64_t num_heads = 64, int64_t vocab_size = 32128) - : GGMLRunner(backend, wtype), model(num_layers, model_dim, ff_dim, num_heads, vocab_size) { - model.init(params_ctx, wtype); + : GGMLRunner(backend), model(num_layers, model_dim, ff_dim, num_heads, vocab_size) { + model.init(params_ctx, tensor_types, prefix); } std::string get_desc() { @@ -855,13 +857,14 @@ struct T5Embedder { T5Runner model; T5Embedder(ggml_backend_t backend, - ggml_type wtype, - int64_t num_layers = 24, - int64_t model_dim = 4096, - int64_t ff_dim = 10240, - int64_t num_heads = 64, - int64_t vocab_size = 32128) - : model(backend, wtype, num_layers, model_dim, ff_dim, num_heads, vocab_size) { + std::map& tensor_types = std::map(), + const std::string prefix = "", + int64_t num_layers = 24, + int64_t model_dim = 4096, + int64_t ff_dim = 10240, + int64_t num_heads = 64, + int64_t vocab_size = 32128) + : model(backend, tensor_types, prefix, num_layers, model_dim, ff_dim, num_heads, vocab_size) { } void get_param_tensors(std::map& tensors, const std::string prefix) { @@ -951,7 +954,7 @@ struct T5Embedder { // ggml_backend_t backend = ggml_backend_cuda_init(0); ggml_backend_t backend = ggml_backend_cpu_init(); ggml_type model_data_type = GGML_TYPE_F32; - std::shared_ptr t5 = std::shared_ptr(new T5Embedder(backend, model_data_type)); + std::shared_ptr t5 = std::shared_ptr(new T5Embedder(backend)); { LOG_INFO("loading from '%s'", file_path.c_str()); diff --git a/tae.hpp b/tae.hpp index 0e03b884e..b9cc33122 100644 --- a/tae.hpp +++ b/tae.hpp @@ -188,14 +188,14 @@ struct TinyAutoEncoder : public GGMLRunner { bool decode_only = false; TinyAutoEncoder(ggml_backend_t backend, - ggml_type wtype, bool decoder_only = true) : decode_only(decoder_only), taesd(decode_only), - GGMLRunner(backend, wtype) { - taesd.init(params_ctx, wtype); + GGMLRunner(backend) { + } + void init_params(std::map& tensor_types, const std::string prefix) { + taesd.init(params_ctx, tensor_types, prefix); } - std::string get_desc() { return "taesd"; } diff --git a/unet.hpp b/unet.hpp index 79f702c4d..b4aa82044 100644 --- a/unet.hpp +++ b/unet.hpp @@ -532,11 +532,13 @@ struct UNetModelRunner : public GGMLRunner { UnetModelBlock unet; UNetModelRunner(ggml_backend_t backend, - ggml_type wtype, SDVersion version = VERSION_SD1, bool flash_attn = false) - : GGMLRunner(backend, wtype), unet(version, flash_attn) { - unet.init(params_ctx, wtype); + : GGMLRunner(backend), unet(version, flash_attn) { + } + + void init_params(std::map& tensor_types, const std::string prefix) { + unet.init(params_ctx, tensor_types, prefix); } std::string get_desc() { diff --git a/upscaler.cpp b/upscaler.cpp index 096352993..d094d25e9 100644 --- a/upscaler.cpp +++ b/upscaler.cpp @@ -38,7 +38,7 @@ struct UpscalerGGML { backend = ggml_backend_cpu_init(); } LOG_INFO("Upscaler weight type: %s", ggml_type_name(model_data_type)); - esrgan_upscaler = std::make_shared(backend, model_data_type); + esrgan_upscaler = std::make_shared(backend); if (!esrgan_upscaler->load_from_file(esrgan_path)) { return false; } @@ -96,8 +96,7 @@ struct upscaler_ctx_t { }; upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str, - int n_threads, - enum sd_type_t wtype) { + int n_threads) { upscaler_ctx_t* upscaler_ctx = (upscaler_ctx_t*)malloc(sizeof(upscaler_ctx_t)); if (upscaler_ctx == NULL) { return NULL; diff --git a/vae.hpp b/vae.hpp index 2985aadd3..4add881f6 100644 --- a/vae.hpp +++ b/vae.hpp @@ -163,8 +163,9 @@ class AE3DConv : public Conv2d { class VideoResnetBlock : public ResnetBlock { protected: - void init_params(struct ggml_context* ctx, ggml_type wtype) { - params["mix_factor"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { + enum ggml_type wtype = (tensor_types.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32; + params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1); } float get_alpha() { @@ -524,12 +525,13 @@ struct AutoEncoderKL : public GGMLRunner { AutoencodingEngine ae; AutoEncoderKL(ggml_backend_t backend, - ggml_type wtype, + std::map& tensor_types, + const std::string prefix, bool decode_only = false, bool use_video_decoder = false, SDVersion version = VERSION_SD1) - : decode_only(decode_only), ae(decode_only, use_video_decoder, version), GGMLRunner(backend, wtype) { - ae.init(params_ctx, wtype); + : decode_only(decode_only), ae(decode_only, use_video_decoder, version), GGMLRunner(backend) { + ae.init(params_ctx, tensor_types, prefix); } std::string get_desc() { From ee674a539ab84b65b1844f56d913310346db55dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 1 Nov 2024 17:40:55 +0100 Subject: [PATCH 02/12] Fix default args --- flux.hpp | 4 +++- mmdit.hpp | 5 +++-- t5.hpp | 4 +++- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/flux.hpp b/flux.hpp index 6f85cb4b8..8ab64bd2f 100644 --- a/flux.hpp +++ b/flux.hpp @@ -824,13 +824,15 @@ namespace Flux { }; struct FluxRunner : public GGMLRunner { + static std::map empty_tensor_types; + public: FluxParams flux_params; Flux flux; std::vector pe_vec; // for cache FluxRunner(ggml_backend_t backend, - std::map& tensor_types = std::map(), + std::map& tensor_types = empty_tensor_types, const std::string prefix = "", SDVersion version = VERSION_FLUX_DEV, bool flash_attn = false) diff --git a/mmdit.hpp b/mmdit.hpp index 253679354..4a126c7d6 100644 --- a/mmdit.hpp +++ b/mmdit.hpp @@ -872,12 +872,13 @@ struct MMDiT : public GGMLBlock { return x; } }; - struct MMDiTRunner : public GGMLRunner { MMDiT mmdit; + static std::map empty_tensor_types; + MMDiTRunner(ggml_backend_t backend, - std::map& tensor_types = std::map(), + std::map& tensor_types = empty_tensor_types, const std::string prefix = "", SDVersion version = VERSION_SD3_2B) : GGMLRunner(backend), mmdit(version) { diff --git a/t5.hpp b/t5.hpp index 2ad3b1105..9cf3279f8 100644 --- a/t5.hpp +++ b/t5.hpp @@ -856,8 +856,10 @@ struct T5Embedder { T5UniGramTokenizer tokenizer; T5Runner model; + static std::map empty_tensor_types; + T5Embedder(ggml_backend_t backend, - std::map& tensor_types = std::map(), + std::map& tensor_types = empty_tensor_types, const std::string prefix = "", int64_t num_layers = 24, int64_t model_dim = 4096, From b465f13208e692411d07d19e326608da63c8578c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Tue, 5 Nov 2024 22:53:11 +0100 Subject: [PATCH 03/12] refactor: fix flux --- flux.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flux.hpp b/flux.hpp index 8ab64bd2f..75411dfeb 100644 --- a/flux.hpp +++ b/flux.hpp @@ -35,7 +35,7 @@ namespace Flux { int64_t hidden_size; float eps; - void init_params(struct ggml_context* ctx, const std::string prefix, std::map& tensor_types, std::map& params) { + void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "scale") != tensor_types.end()) ? tensor_types[prefix + "scale"] : GGML_TYPE_F32; params["scale"] = ggml_new_tensor_1d(ctx, wtype, hidden_size); } From cb46146203f0fd447e37ffa1a6e79cd9a9c163bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Mon, 25 Nov 2024 13:10:02 +0100 Subject: [PATCH 04/12] Refactor photmaker v2 support --- pmid.hpp | 5 ++--- stable-diffusion.cpp | 6 +++--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pmid.hpp b/pmid.hpp index 3d719a907..ea9f02eb6 100644 --- a/pmid.hpp +++ b/pmid.hpp @@ -631,7 +631,7 @@ struct PhotoMakerIDEncoder : public GGMLRunner { if (pm_version == PM_VERSION_1) { id_encoder.init(params_ctx, tensor_types, prefix); } else if (pm_version == PM_VERSION_2) { - id_encoder2.init(params_ctx, wtype); + id_encoder2.init(params_ctx, tensor_types, prefix); } } @@ -780,11 +780,10 @@ struct PhotoMakerIDEmbed : public GGMLRunner { bool applied = false; PhotoMakerIDEmbed(ggml_backend_t backend, - ggml_type wtype, ModelLoader* ml, const std::string& file_path = "", const std::string& prefix = "") - : file_path(file_path), GGMLRunner(backend, wtype), model_loader(ml) { + : file_path(file_path), GGMLRunner(backend), model_loader(ml) { if (!model_loader->init_from_file(file_path, prefix)) { load_failed = true; } diff --git a/stable-diffusion.cpp b/stable-diffusion.cpp index 721e516f8..49d60810e 100644 --- a/stable-diffusion.cpp +++ b/stable-diffusion.cpp @@ -334,7 +334,7 @@ class StableDiffusionGGML { diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types, version, diffusion_flash_attn); } else { if (id_embeddings_path.find("v2") != std::string::npos) { - cond_stage_model = std::make_shared(clip_backend, conditioner_wtype, embeddings_path, version, PM_VERSION_2); + cond_stage_model = std::make_shared(clip_backend, model_loader.tensor_storages_types, embeddings_path, version, PM_VERSION_2); } else { cond_stage_model = std::make_shared(clip_backend, model_loader.tensor_storages_types, embeddings_path, version); } @@ -374,10 +374,10 @@ class StableDiffusionGGML { } if (id_embeddings_path.find("v2") != std::string::npos) { - pmid_model = std::make_shared(backend, model_wtype, version, PM_VERSION_2); + pmid_model = std::make_shared(backend, model_loader.tensor_storages_types, "pmid", version, PM_VERSION_2); LOG_INFO("using PhotoMaker Version 2"); } else { - pmid_model = std::make_shared(backend,model_loader.tensor_storages_types, "pmid", version); + pmid_model = std::make_shared(backend, model_loader.tensor_storages_types, "pmid", version); } if (id_embeddings_path.size() > 0) { pmid_lora = std::make_shared(backend, id_embeddings_path, ""); From 371d81f001f8438b83f6a9296e01b3b1ce15fa73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Tue, 26 Nov 2024 21:50:34 +0100 Subject: [PATCH 05/12] unet: refactor the refactoring --- diffusion_model.hpp | 3 +-- unet.hpp | 5 ++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/diffusion_model.hpp b/diffusion_model.hpp index 8e5e2a6f5..710aecf68 100644 --- a/diffusion_model.hpp +++ b/diffusion_model.hpp @@ -34,8 +34,7 @@ struct UNetModel : public DiffusionModel { std::map& tensor_types, SDVersion version = VERSION_SD1, bool flash_attn = false) - : unet(backend, version, flash_attn) { - unet.init_params(tensor_types, "model.diffusion_model"); + : unet(backend, tensor_types, "model.diffusion_model", version, flash_attn) { } void alloc_params_buffer() { diff --git a/unet.hpp b/unet.hpp index b4aa82044..2a7adb3d2 100644 --- a/unet.hpp +++ b/unet.hpp @@ -532,12 +532,11 @@ struct UNetModelRunner : public GGMLRunner { UnetModelBlock unet; UNetModelRunner(ggml_backend_t backend, + std::map& tensor_types, + const std::string prefix, SDVersion version = VERSION_SD1, bool flash_attn = false) : GGMLRunner(backend), unet(version, flash_attn) { - } - - void init_params(std::map& tensor_types, const std::string prefix) { unet.init(params_ctx, tensor_types, prefix); } From 04ca926d4317aa55642d8f7275659daabce26c27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Tue, 26 Nov 2024 21:56:18 +0100 Subject: [PATCH 06/12] Refactor: fix controlnet and tae --- control.hpp | 8 +++----- stable-diffusion.cpp | 4 ++-- tae.hpp | 5 +++-- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/control.hpp b/control.hpp index 3b2c1e1b9..0cf081cea 100644 --- a/control.hpp +++ b/control.hpp @@ -317,12 +317,10 @@ struct ControlNet : public GGMLRunner { bool guided_hint_cached = false; ControlNet(ggml_backend_t backend, - SDVersion version = VERSION_SD1) + std::map& tensor_types, + SDVersion version = VERSION_SD1) : GGMLRunner(backend), control_net(version) { - } - - void init_params(std::map& tensor_types, const std::string prefix) { - control_net.init(params_ctx, tensor_types, prefix); + control_net.init(params_ctx, tensor_types, ""); } ~ControlNet() { diff --git a/stable-diffusion.cpp b/stable-diffusion.cpp index 49d60810e..a12cfed6e 100644 --- a/stable-diffusion.cpp +++ b/stable-diffusion.cpp @@ -358,7 +358,7 @@ class StableDiffusionGGML { first_stage_model->alloc_params_buffer(); first_stage_model->get_param_tensors(tensors, "first_stage_model"); } else { - tae_first_stage = std::make_shared(backend, vae_decode_only); + tae_first_stage = std::make_shared(backend, model_loader.tensor_storages_types, "decoder.layers", vae_decode_only); } // first_stage_model->get_param_tensors(tensors, "first_stage_model."); @@ -370,7 +370,7 @@ class StableDiffusionGGML { } else { controlnet_backend = backend; } - control_net = std::make_shared(controlnet_backend, version); + control_net = std::make_shared(controlnet_backend, model_loader.tensor_storages_types, version); } if (id_embeddings_path.find("v2") != std::string::npos) { diff --git a/tae.hpp b/tae.hpp index b9cc33122..ac061115c 100644 --- a/tae.hpp +++ b/tae.hpp @@ -188,14 +188,15 @@ struct TinyAutoEncoder : public GGMLRunner { bool decode_only = false; TinyAutoEncoder(ggml_backend_t backend, + std::map& tensor_types, + const std::string prefix, bool decoder_only = true) : decode_only(decoder_only), taesd(decode_only), GGMLRunner(backend) { - } - void init_params(std::map& tensor_types, const std::string prefix) { taesd.init(params_ctx, tensor_types, prefix); } + std::string get_desc() { return "taesd"; } From 38f5685fc5a6cd750396ca976e1266c4ae455cfa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Tue, 26 Nov 2024 23:10:33 +0100 Subject: [PATCH 07/12] refactor: upscaler --- esrgan.hpp | 7 +++---- upscaler.cpp | 7 +++++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/esrgan.hpp b/esrgan.hpp index 1f39abc67..97931f809 100644 --- a/esrgan.hpp +++ b/esrgan.hpp @@ -142,12 +142,11 @@ struct ESRGAN : public GGMLRunner { int scale = 4; int tile_size = 128; // avoid cuda OOM for 4gb VRAM - ESRGAN(ggml_backend_t backend) + ESRGAN(ggml_backend_t backend,std::map& tensor_types) : GGMLRunner(backend) { + rrdb_net.init(params_ctx, tensor_types, ""); } - void init_params(std::map& tensor_types, const std::string prefix) { - rrdb_net.init(params_ctx, tensor_types, prefix); - } + std::string get_desc() { return "esrgan"; diff --git a/upscaler.cpp b/upscaler.cpp index d094d25e9..bada458b3 100644 --- a/upscaler.cpp +++ b/upscaler.cpp @@ -32,13 +32,16 @@ struct UpscalerGGML { LOG_DEBUG("Using SYCL backend"); backend = ggml_backend_sycl_init(0); #endif - + ModelLoader model_loader; + if (!model_loader.init_from_file(esrgan_path)) { + LOG_ERROR("init model loader from file failed: '%s'", esrgan_path.c_str()); + } if (!backend) { LOG_DEBUG("Using CPU backend"); backend = ggml_backend_cpu_init(); } LOG_INFO("Upscaler weight type: %s", ggml_type_name(model_data_type)); - esrgan_upscaler = std::make_shared(backend); + esrgan_upscaler = std::make_shared(backend, model_loader.tensor_storages_types); if (!esrgan_upscaler->load_from_file(esrgan_path)) { return false; } From 170663f7e8c6b6f63029574a2c0efb3dafb048ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Wed, 27 Nov 2024 00:32:43 +0100 Subject: [PATCH 08/12] Refactor: fix runtime type override --- model.cpp | 15 +++++++++++++++ model.h | 1 + stable-diffusion.cpp | 2 ++ 3 files changed, 18 insertions(+) diff --git a/model.cpp b/model.cpp index 9e3719406..3caac933c 100644 --- a/model.cpp +++ b/model.cpp @@ -1607,6 +1607,21 @@ ggml_type ModelLoader::get_vae_wtype() { return GGML_TYPE_COUNT; } +void ModelLoader::set_wtype_override(ggml_type wtype, std::string prefix) { + for (auto& pair : tensor_storages_types) { + if (prefix.size() < 1 || pair.first.substr(0, prefix.size()) == prefix) { + for (auto& tensor_storage : tensor_storages) { + if (tensor_storage.name == pair.first) { + if (tensor_should_be_converted(tensor_storage, wtype)) { + pair.second = wtype; + } + break; + } + } + } + } +} + std::string ModelLoader::load_merges() { std::string merges_utf8_str(reinterpret_cast(merges_utf8_c_str), sizeof(merges_utf8_c_str)); return merges_utf8_str; diff --git a/model.h b/model.h index 6100fa578..9b2f86df3 100644 --- a/model.h +++ b/model.h @@ -186,6 +186,7 @@ class ModelLoader { ggml_type get_conditioner_wtype(); ggml_type get_diffusion_model_wtype(); ggml_type get_vae_wtype(); + void set_wtype_override(ggml_type wtype, std::string prefix = ""); bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, ggml_backend_t backend); bool load_tensors(std::map& tensors, ggml_backend_t backend, diff --git a/stable-diffusion.cpp b/stable-diffusion.cpp index a12cfed6e..55992b70a 100644 --- a/stable-diffusion.cpp +++ b/stable-diffusion.cpp @@ -264,10 +264,12 @@ class StableDiffusionGGML { conditioner_wtype = wtype; diffusion_model_wtype = wtype; vae_wtype = wtype; + model_loader.set_wtype_override(wtype); } if (version == VERSION_SDXL) { vae_wtype = GGML_TYPE_F32; + model_loader.set_wtype_override(GGML_TYPE_F32, "vae."); } LOG_INFO("Weight type: %s", model_wtype != SD_TYPE_COUNT ? ggml_type_name(model_wtype) : "??"); From 8e7fbf884f17fc29b94065c5f577405a9e501cb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Wed, 27 Nov 2024 16:15:00 +0100 Subject: [PATCH 09/12] upscaler: use fp16 again --- upscaler.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/upscaler.cpp b/upscaler.cpp index bada458b3..1cf34c1a3 100644 --- a/upscaler.cpp +++ b/upscaler.cpp @@ -36,6 +36,7 @@ struct UpscalerGGML { if (!model_loader.init_from_file(esrgan_path)) { LOG_ERROR("init model loader from file failed: '%s'", esrgan_path.c_str()); } + model_loader.set_wtype_override(model_data_type); if (!backend) { LOG_DEBUG("Using CPU backend"); backend = ggml_backend_cpu_init(); From e7eabd3ac6b4fa49f5d870b2c0d9fdf220db0304 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 29 Nov 2024 01:08:11 +0100 Subject: [PATCH 10/12] Refactor: Flexible sd3 arch --- diffusion_model.hpp | 5 ++- mmdit.hpp | 83 ++++++++++++++++++++------------------------ model.cpp | 14 ++------ model.h | 6 ++-- stable-diffusion.cpp | 6 ++-- 5 files changed, 46 insertions(+), 68 deletions(-) diff --git a/diffusion_model.hpp b/diffusion_model.hpp index 710aecf68..34b326fa2 100644 --- a/diffusion_model.hpp +++ b/diffusion_model.hpp @@ -83,9 +83,8 @@ struct MMDiTModel : public DiffusionModel { MMDiTRunner mmdit; MMDiTModel(ggml_backend_t backend, - std::map& tensor_types, - SDVersion version = VERSION_SD3_2B) - : mmdit(backend, tensor_types, "model.diffusion_model", version) { + std::map& tensor_types) + : mmdit(backend, tensor_types, "model.diffusion_model") { } void alloc_params_buffer() { diff --git a/mmdit.hpp b/mmdit.hpp index 4a126c7d6..f92b428d6 100644 --- a/mmdit.hpp +++ b/mmdit.hpp @@ -637,7 +637,6 @@ struct FinalLayer : public GGMLBlock { struct MMDiT : public GGMLBlock { // Diffusion model with a Transformer backbone. protected: - SDVersion version = VERSION_SD3_2B; int64_t input_size = -1; int64_t patch_size = 2; int64_t in_channels = 16; @@ -659,8 +658,7 @@ struct MMDiT : public GGMLBlock { } public: - MMDiT(SDVersion version = VERSION_SD3_2B) - : version(version) { + MMDiT(std::map& tensor_types) { // input_size is always None // learn_sigma is always False // register_length is alwalys 0 @@ -672,48 +670,44 @@ struct MMDiT : public GGMLBlock { // pos_embed_scaling_factor is not used // pos_embed_offset is not used // context_embedder_config is always {'target': 'torch.nn.Linear', 'params': {'in_features': 4096, 'out_features': 1536}} - if (version == VERSION_SD3_2B) { - input_size = -1; - patch_size = 2; - in_channels = 16; - depth = 24; - mlp_ratio = 4.0f; - adm_in_channels = 2048; - out_channels = 16; - pos_embed_max_size = 192; - num_patchs = 36864; // 192 * 192 - context_size = 4096; - context_embedder_out_dim = 1536; - } else if (version == VERSION_SD3_5_8B) { - input_size = -1; - patch_size = 2; - in_channels = 16; - depth = 38; - mlp_ratio = 4.0f; - adm_in_channels = 2048; - out_channels = 16; - pos_embed_max_size = 192; - num_patchs = 36864; // 192 * 192 - context_size = 4096; - context_embedder_out_dim = 2432; - qk_norm = "rms"; - } else if (version == VERSION_SD3_5_2B) { - input_size = -1; - patch_size = 2; - in_channels = 16; - depth = 24; - d_self = 12; - mlp_ratio = 4.0f; - adm_in_channels = 2048; - out_channels = 16; - pos_embed_max_size = 384; - num_patchs = 147456; - context_size = 4096; - context_embedder_out_dim = 1536; - qk_norm = "rms"; + + // read tensors from tensor_types + for (auto pair : tensor_types) { + std::string tensor_name = pair.first; + if (tensor_name.find("model.diffusion_model.") == std::string::npos) + continue; + size_t jb = tensor_name.find("joint_blocks."); + if (jb != std::string::npos) { + tensor_name = tensor_name.substr(jb); // remove prefix + int block_depth = atoi(tensor_name.substr(13, tensor_name.find(".", 13)).c_str()); + if (block_depth + 1 > depth) { + depth = block_depth + 1; + } + if (tensor_name.find("attn.ln") != std::string::npos) { + if (tensor_name.find(".bias") != std::string::npos) { + qk_norm = "ln"; + } else { + qk_norm = "rms"; + } + } + if (tensor_name.find("attn2") != std::string::npos) { + if (block_depth > d_self) { + d_self = block_depth; + } + } + } } + + if (d_self >= 0) { + pos_embed_max_size *= 2; + num_patchs *= 4; + } + + LOG_INFO("MMDiT layers: %d (including %d MMDiT-x layers)", depth, d_self + 1); + int64_t default_out_channels = in_channels; hidden_size = 64 * depth; + context_embedder_out_dim = 64 * depth; int64_t num_heads = depth; blocks["x_embedder"] = std::shared_ptr(new PatchEmbed(input_size, patch_size, in_channels, hidden_size, true)); @@ -879,9 +873,8 @@ struct MMDiTRunner : public GGMLRunner { MMDiTRunner(ggml_backend_t backend, std::map& tensor_types = empty_tensor_types, - const std::string prefix = "", - SDVersion version = VERSION_SD3_2B) - : GGMLRunner(backend), mmdit(version) { + const std::string prefix = "") + : GGMLRunner(backend), mmdit(tensor_types) { mmdit.init(params_ctx, tensor_types, prefix); } diff --git a/model.cpp b/model.cpp index 3caac933c..76ac60761 100644 --- a/model.cpp +++ b/model.cpp @@ -1462,7 +1462,6 @@ SDVersion ModelLoader::get_sd_version() { bool is_flux = false; bool is_schnell = true; bool is_lite = true; - bool is_sd3 = false; for (auto& tensor_storage : tensor_storages) { if (tensor_storage.name.find("model.diffusion_model.guidance_in.in_layer.weight") != std::string::npos) { is_schnell = false; @@ -1473,14 +1472,8 @@ SDVersion ModelLoader::get_sd_version() { if (tensor_storage.name.find("model.diffusion_model.double_blocks.8") != std::string::npos) { is_lite = false; } - if (tensor_storage.name.find("joint_blocks.0.x_block.attn2.ln_q.weight") != std::string::npos) { - return VERSION_SD3_5_2B; - } - if (tensor_storage.name.find("joint_blocks.37.x_block.attn.ln_q.weight") != std::string::npos) { - return VERSION_SD3_5_8B; - } - if (tensor_storage.name.find("model.diffusion_model.joint_blocks.23.") != std::string::npos) { - is_sd3 = true; + if (tensor_storage.name.find("model.diffusion_model.joint_blocks.") != std::string::npos) { + return VERSION_SD3; } if (tensor_storage.name.find("conditioner.embedders.1") != std::string::npos) { return VERSION_SDXL; @@ -1512,9 +1505,6 @@ SDVersion ModelLoader::get_sd_version() { return VERSION_FLUX_DEV; } } - if (is_sd3) { - return VERSION_SD3_2B; - } if (token_embedding_weight.ne[0] == 768) { return VERSION_SD1; } else if (token_embedding_weight.ne[0] == 1024) { diff --git a/model.h b/model.h index 9b2f86df3..ba65d7ead 100644 --- a/model.h +++ b/model.h @@ -22,11 +22,9 @@ enum SDVersion { VERSION_SD2, VERSION_SDXL, VERSION_SVD, - VERSION_SD3_2B, + VERSION_SD3, VERSION_FLUX_DEV, VERSION_FLUX_SCHNELL, - VERSION_SD3_5_8B, - VERSION_SD3_5_2B, VERSION_FLUX_LITE, VERSION_COUNT, }; @@ -39,7 +37,7 @@ static inline bool sd_version_is_flux(SDVersion version) { } static inline bool sd_version_is_sd3(SDVersion version) { - if (version == VERSION_SD3_2B || version == VERSION_SD3_5_8B || version == VERSION_SD3_5_2B) { + if (version == VERSION_SD3) { return true; } return false; diff --git a/stable-diffusion.cpp b/stable-diffusion.cpp index 55992b70a..3b065aa39 100644 --- a/stable-diffusion.cpp +++ b/stable-diffusion.cpp @@ -29,11 +29,9 @@ const char* model_version_to_str[] = { "SD 2.x", "SDXL", "SVD", - "SD3 2B", + "SD3.x", "Flux Dev", "Flux Schnell", - "SD3.5 8B", - "SD3.5 2B", "Flux Lite 8B"}; const char* sampling_methods_str[] = { @@ -330,7 +328,7 @@ class StableDiffusionGGML { LOG_WARN("flash attention in this diffusion model is currently unsupported!"); } cond_stage_model = std::make_shared(clip_backend, model_loader.tensor_storages_types); - diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types, version); + diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types); } else if (sd_version_is_flux(version)) { cond_stage_model = std::make_shared(clip_backend, model_loader.tensor_storages_types); diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types, version, diffusion_flash_attn); From 4080c29140198b1014b3c0fe345f8355e26dd8a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 29 Nov 2024 01:42:26 +0100 Subject: [PATCH 11/12] Refactor: Flexible Flux arch --- diffusion_model.hpp | 3 +-- flux.hpp | 41 ++++++++++++++++++++++++++++++++++------- model.cpp | 22 ++-------------------- model.h | 6 ++---- stable-diffusion.cpp | 15 ++++++++------- 5 files changed, 47 insertions(+), 40 deletions(-) diff --git a/diffusion_model.hpp b/diffusion_model.hpp index 34b326fa2..4bc36e3d0 100644 --- a/diffusion_model.hpp +++ b/diffusion_model.hpp @@ -133,9 +133,8 @@ struct FluxModel : public DiffusionModel { FluxModel(ggml_backend_t backend, std::map& tensor_types, - SDVersion version = VERSION_FLUX_DEV, bool flash_attn = false) - : flux(backend, tensor_types, "model.diffusion_model", version, flash_attn) { + : flux(backend, tensor_types, "model.diffusion_model", flash_attn) { } void alloc_params_buffer() { diff --git a/flux.hpp b/flux.hpp index 75411dfeb..fdd00ebcb 100644 --- a/flux.hpp +++ b/flux.hpp @@ -834,16 +834,43 @@ namespace Flux { FluxRunner(ggml_backend_t backend, std::map& tensor_types = empty_tensor_types, const std::string prefix = "", - SDVersion version = VERSION_FLUX_DEV, - bool flash_attn = false) + bool flash_attn = false) : GGMLRunner(backend) { - flux_params.flash_attn = flash_attn; - if (version == VERSION_FLUX_SCHNELL) { - flux_params.guidance_embed = false; + flux_params.flash_attn = flash_attn; + flux_params.guidance_embed = false; + flux_params.depth = 0; + flux_params.depth_single_blocks = 0; + for (auto pair : tensor_types) { + std::string tensor_name = pair.first; + if (tensor_name.find("model.diffusion_model.") == std::string::npos) + continue; + if (tensor_name.find("guidance_in.in_layer.weight") != std::string::npos) { + // not schnell + flux_params.guidance_embed = true; + } + size_t db = tensor_name.find("double_blocks."); + if (db != std::string::npos) { + tensor_name = tensor_name.substr(db); // remove prefix + int block_depth = atoi(tensor_name.substr(14, tensor_name.find(".", 14)).c_str()); + if (block_depth + 1 > flux_params.depth) { + flux_params.depth = block_depth + 1; + } + } + size_t sb = tensor_name.find("single_blocks."); + if (sb != std::string::npos) { + tensor_name = tensor_name.substr(sb); // remove prefix + int block_depth = atoi(tensor_name.substr(14, tensor_name.find(".", 14)).c_str()); + if (block_depth + 1 > flux_params.depth_single_blocks) { + flux_params.depth_single_blocks = block_depth + 1; + } + } } - if (version == VERSION_FLUX_LITE) { - flux_params.depth = 8; + + LOG_INFO("Flux blocks: %d double, %d single", flux_params.depth, flux_params.depth_single_blocks); + if (!flux_params.guidance_embed) { + LOG_INFO("Flux guidance is disabled (Schnell mode)"); } + flux = Flux(flux_params); flux.init(params_ctx, tensor_types, prefix); } diff --git a/model.cpp b/model.cpp index 76ac60761..c90918ad2 100644 --- a/model.cpp +++ b/model.cpp @@ -1459,18 +1459,9 @@ bool ModelLoader::init_from_ckpt_file(const std::string& file_path, const std::s SDVersion ModelLoader::get_sd_version() { TensorStorage token_embedding_weight; - bool is_flux = false; - bool is_schnell = true; - bool is_lite = true; for (auto& tensor_storage : tensor_storages) { - if (tensor_storage.name.find("model.diffusion_model.guidance_in.in_layer.weight") != std::string::npos) { - is_schnell = false; - } if (tensor_storage.name.find("model.diffusion_model.double_blocks.") != std::string::npos) { - is_flux = true; - } - if (tensor_storage.name.find("model.diffusion_model.double_blocks.8") != std::string::npos) { - is_lite = false; + return VERSION_FLUX; } if (tensor_storage.name.find("model.diffusion_model.joint_blocks.") != std::string::npos) { return VERSION_SD3; @@ -1495,16 +1486,7 @@ SDVersion ModelLoader::get_sd_version() { // break; } } - if (is_flux) { - if (is_schnell) { - GGML_ASSERT(!is_lite); - return VERSION_FLUX_SCHNELL; - } else if (is_lite) { - return VERSION_FLUX_LITE; - } else { - return VERSION_FLUX_DEV; - } - } + if (token_embedding_weight.ne[0] == 768) { return VERSION_SD1; } else if (token_embedding_weight.ne[0] == 1024) { diff --git a/model.h b/model.h index ba65d7ead..29d46c192 100644 --- a/model.h +++ b/model.h @@ -23,14 +23,12 @@ enum SDVersion { VERSION_SDXL, VERSION_SVD, VERSION_SD3, - VERSION_FLUX_DEV, - VERSION_FLUX_SCHNELL, - VERSION_FLUX_LITE, + VERSION_FLUX, VERSION_COUNT, }; static inline bool sd_version_is_flux(SDVersion version) { - if (version == VERSION_FLUX_DEV || version == VERSION_FLUX_SCHNELL || version == VERSION_FLUX_LITE) { + if (version == VERSION_FLUX) { return true; } return false; diff --git a/stable-diffusion.cpp b/stable-diffusion.cpp index 3b065aa39..5abc29507 100644 --- a/stable-diffusion.cpp +++ b/stable-diffusion.cpp @@ -30,9 +30,7 @@ const char* model_version_to_str[] = { "SDXL", "SVD", "SD3.x", - "Flux Dev", - "Flux Schnell", - "Flux Lite 8B"}; + "Flux"}; const char* sampling_methods_str[] = { "Euler A", @@ -331,7 +329,7 @@ class StableDiffusionGGML { diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types); } else if (sd_version_is_flux(version)) { cond_stage_model = std::make_shared(clip_backend, model_loader.tensor_storages_types); - diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types, version, diffusion_flash_attn); + diffusion_model = std::make_shared(backend, model_loader.tensor_storages_types, diffusion_flash_attn); } else { if (id_embeddings_path.find("v2") != std::string::npos) { cond_stage_model = std::make_shared(clip_backend, model_loader.tensor_storages_types, embeddings_path, version, PM_VERSION_2); @@ -533,9 +531,12 @@ class StableDiffusionGGML { denoiser = std::make_shared(); } else if (sd_version_is_flux(version)) { LOG_INFO("running in Flux FLOW mode"); - float shift = 1.15f; - if (version == VERSION_FLUX_SCHNELL) { - shift = 1.0f; // TODO: validate + float shift = 1.0f; // TODO: validate + for (auto pair : model_loader.tensor_storages_types) { + if (pair.first.find("model.diffusion_model.guidance_in.in_layer.weight") != std::string::npos) { + shift = 1.15f; + break; + } } denoiser = std::make_shared(shift); } else if (is_using_v_parameterization) { From 5d501cd52b538a2809cb28cdbc44c30348d7619e Mon Sep 17 00:00:00 2001 From: leejet Date: Sat, 30 Nov 2024 14:17:38 +0800 Subject: [PATCH 12/12] format code --- common.hpp | 10 +++++----- control.hpp | 2 +- diffusion_model.hpp | 2 +- esrgan.hpp | 3 +-- ggml_extend.hpp | 4 ++-- lora.hpp | 2 +- mmdit.hpp | 2 +- t5.hpp | 2 +- tae.hpp | 2 +- 9 files changed, 14 insertions(+), 15 deletions(-) diff --git a/common.hpp b/common.hpp index da8353b36..337b4a0c4 100644 --- a/common.hpp +++ b/common.hpp @@ -183,10 +183,10 @@ class GEGLU : public GGMLBlock { int64_t dim_out; void init_params(struct ggml_context* ctx, std::map& tensor_types, std::string prefix = "") { - enum ggml_type wtype = (tensor_types.find(prefix + "proj.weight") != tensor_types.end()) ? tensor_types[prefix + "proj.weight"] : GGML_TYPE_F32; - enum ggml_type bias_wtype = GGML_TYPE_F32;//(tensor_types.find(prefix + "proj.bias") != tensor_types.end()) ? tensor_types[prefix + "proj.bias"] : GGML_TYPE_F32; - params["proj.weight"] = ggml_new_tensor_2d(ctx, wtype, dim_in, dim_out * 2); - params["proj.bias"] = ggml_new_tensor_1d(ctx, bias_wtype, dim_out * 2); + enum ggml_type wtype = (tensor_types.find(prefix + "proj.weight") != tensor_types.end()) ? tensor_types[prefix + "proj.weight"] : GGML_TYPE_F32; + enum ggml_type bias_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "proj.bias") != tensor_types.end()) ? tensor_types[prefix + "proj.bias"] : GGML_TYPE_F32; + params["proj.weight"] = ggml_new_tensor_2d(ctx, wtype, dim_in, dim_out * 2); + params["proj.bias"] = ggml_new_tensor_1d(ctx, bias_wtype, dim_out * 2); } public: @@ -442,7 +442,7 @@ class AlphaBlender : public GGMLBlock { protected: void init_params(struct ggml_context* ctx, std::map& tensor_types, std::string prefix = "") { // Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix - enum ggml_type wtype = GGML_TYPE_F32;//(tensor_types.ypes.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32; + enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32; params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1); } diff --git a/control.hpp b/control.hpp index 0cf081cea..ed36db280 100644 --- a/control.hpp +++ b/control.hpp @@ -318,7 +318,7 @@ struct ControlNet : public GGMLRunner { ControlNet(ggml_backend_t backend, std::map& tensor_types, - SDVersion version = VERSION_SD1) + SDVersion version = VERSION_SD1) : GGMLRunner(backend), control_net(version) { control_net.init(params_ctx, tensor_types, ""); } diff --git a/diffusion_model.hpp b/diffusion_model.hpp index 4bc36e3d0..cbc0cd4c1 100644 --- a/diffusion_model.hpp +++ b/diffusion_model.hpp @@ -133,7 +133,7 @@ struct FluxModel : public DiffusionModel { FluxModel(ggml_backend_t backend, std::map& tensor_types, - bool flash_attn = false) + bool flash_attn = false) : flux(backend, tensor_types, "model.diffusion_model", flash_attn) { } diff --git a/esrgan.hpp b/esrgan.hpp index 97931f809..989d15fee 100644 --- a/esrgan.hpp +++ b/esrgan.hpp @@ -142,12 +142,11 @@ struct ESRGAN : public GGMLRunner { int scale = 4; int tile_size = 128; // avoid cuda OOM for 4gb VRAM - ESRGAN(ggml_backend_t backend,std::map& tensor_types) + ESRGAN(ggml_backend_t backend, std::map& tensor_types) : GGMLRunner(backend) { rrdb_net.init(params_ctx, tensor_types, ""); } - std::string get_desc() { return "esrgan"; } diff --git a/ggml_extend.hpp b/ggml_extend.hpp index 8c84ba3f3..8afcd367c 100644 --- a/ggml_extend.hpp +++ b/ggml_extend.hpp @@ -675,13 +675,13 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_attention(struct ggml_context* ctx #if defined(SD_USE_FLASH_ATTENTION) && !defined(SD_USE_CUBLAS) && !defined(SD_USE_METAL) && !defined(SD_USE_VULKAN) && !defined(SD_USE_SYCL) struct ggml_tensor* kqv = ggml_flash_attn(ctx, q, k, v, false); // [N * n_head, n_token, d_head] #else - float d_head = (float)q->ne[0]; + float d_head = (float)q->ne[0]; struct ggml_tensor* kq = ggml_mul_mat(ctx, k, q); // [N * n_head, n_token, n_k] kq = ggml_scale_inplace(ctx, kq, 1.0f / sqrt(d_head)); if (mask) { kq = ggml_diag_mask_inf_inplace(ctx, kq, 0); } - kq = ggml_soft_max_inplace(ctx, kq); + kq = ggml_soft_max_inplace(ctx, kq); struct ggml_tensor* kqv = ggml_mul_mat(ctx, v, kq); // [N * n_head, n_token, d_head] #endif return kqv; diff --git a/lora.hpp b/lora.hpp index 43daba48f..5f458faee 100644 --- a/lora.hpp +++ b/lora.hpp @@ -17,7 +17,7 @@ struct LoraModel : public GGMLRunner { LoraModel(ggml_backend_t backend, const std::string& file_path = "", - const std::string prefix = "") + const std::string prefix = "") : file_path(file_path), GGMLRunner(backend) { if (!model_loader.init_from_file(file_path, prefix)) { load_failed = true; diff --git a/mmdit.hpp b/mmdit.hpp index f92b428d6..dee7b1c49 100644 --- a/mmdit.hpp +++ b/mmdit.hpp @@ -678,7 +678,7 @@ struct MMDiT : public GGMLBlock { continue; size_t jb = tensor_name.find("joint_blocks."); if (jb != std::string::npos) { - tensor_name = tensor_name.substr(jb); // remove prefix + tensor_name = tensor_name.substr(jb); // remove prefix int block_depth = atoi(tensor_name.substr(13, tensor_name.find(".", 13)).c_str()); if (block_depth + 1 > depth) { depth = block_depth + 1; diff --git a/t5.hpp b/t5.hpp index 9cf3279f8..2a53e2743 100644 --- a/t5.hpp +++ b/t5.hpp @@ -357,7 +357,7 @@ class T5UniGramTokenizer { BuildTrie(&pieces); } - ~T5UniGramTokenizer() {}; + ~T5UniGramTokenizer(){}; std::string Normalize(const std::string& input) const { // Ref: https://github.com/huggingface/tokenizers/blob/1ff56c0c70b045f0cd82da1af9ac08cd4c7a6f9f/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py#L29 diff --git a/tae.hpp b/tae.hpp index ac061115c..fee5e8328 100644 --- a/tae.hpp +++ b/tae.hpp @@ -196,7 +196,7 @@ struct TinyAutoEncoder : public GGMLRunner { GGMLRunner(backend) { taesd.init(params_ctx, tensor_types, prefix); } - + std::string get_desc() { return "taesd"; }