Skip to content

Commit 5d501cd

Browse files
committed
format code
1 parent 4080c29 commit 5d501cd

File tree

9 files changed

+14
-15
lines changed

9 files changed

+14
-15
lines changed

common.hpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -183,10 +183,10 @@ class GEGLU : public GGMLBlock {
183183
int64_t dim_out;
184184

185185
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = "") {
186-
enum ggml_type wtype = (tensor_types.find(prefix + "proj.weight") != tensor_types.end()) ? tensor_types[prefix + "proj.weight"] : GGML_TYPE_F32;
187-
enum ggml_type bias_wtype = GGML_TYPE_F32;//(tensor_types.find(prefix + "proj.bias") != tensor_types.end()) ? tensor_types[prefix + "proj.bias"] : GGML_TYPE_F32;
188-
params["proj.weight"] = ggml_new_tensor_2d(ctx, wtype, dim_in, dim_out * 2);
189-
params["proj.bias"] = ggml_new_tensor_1d(ctx, bias_wtype, dim_out * 2);
186+
enum ggml_type wtype = (tensor_types.find(prefix + "proj.weight") != tensor_types.end()) ? tensor_types[prefix + "proj.weight"] : GGML_TYPE_F32;
187+
enum ggml_type bias_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "proj.bias") != tensor_types.end()) ? tensor_types[prefix + "proj.bias"] : GGML_TYPE_F32;
188+
params["proj.weight"] = ggml_new_tensor_2d(ctx, wtype, dim_in, dim_out * 2);
189+
params["proj.bias"] = ggml_new_tensor_1d(ctx, bias_wtype, dim_out * 2);
190190
}
191191

192192
public:
@@ -442,7 +442,7 @@ class AlphaBlender : public GGMLBlock {
442442
protected:
443443
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = "") {
444444
// Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix
445-
enum ggml_type wtype = GGML_TYPE_F32;//(tensor_types.ypes.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32;
445+
enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32;
446446
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
447447
}
448448

control.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -318,7 +318,7 @@ struct ControlNet : public GGMLRunner {
318318

319319
ControlNet(ggml_backend_t backend,
320320
std::map<std::string, enum ggml_type>& tensor_types,
321-
SDVersion version = VERSION_SD1)
321+
SDVersion version = VERSION_SD1)
322322
: GGMLRunner(backend), control_net(version) {
323323
control_net.init(params_ctx, tensor_types, "");
324324
}

diffusion_model.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ struct FluxModel : public DiffusionModel {
133133

134134
FluxModel(ggml_backend_t backend,
135135
std::map<std::string, enum ggml_type>& tensor_types,
136-
bool flash_attn = false)
136+
bool flash_attn = false)
137137
: flux(backend, tensor_types, "model.diffusion_model", flash_attn) {
138138
}
139139

esrgan.hpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -142,12 +142,11 @@ struct ESRGAN : public GGMLRunner {
142142
int scale = 4;
143143
int tile_size = 128; // avoid cuda OOM for 4gb VRAM
144144

145-
ESRGAN(ggml_backend_t backend,std::map<std::string, enum ggml_type>& tensor_types)
145+
ESRGAN(ggml_backend_t backend, std::map<std::string, enum ggml_type>& tensor_types)
146146
: GGMLRunner(backend) {
147147
rrdb_net.init(params_ctx, tensor_types, "");
148148
}
149149

150-
151150
std::string get_desc() {
152151
return "esrgan";
153152
}

ggml_extend.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -675,13 +675,13 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_attention(struct ggml_context* ctx
675675
#if defined(SD_USE_FLASH_ATTENTION) && !defined(SD_USE_CUBLAS) && !defined(SD_USE_METAL) && !defined(SD_USE_VULKAN) && !defined(SD_USE_SYCL)
676676
struct ggml_tensor* kqv = ggml_flash_attn(ctx, q, k, v, false); // [N * n_head, n_token, d_head]
677677
#else
678-
float d_head = (float)q->ne[0];
678+
float d_head = (float)q->ne[0];
679679
struct ggml_tensor* kq = ggml_mul_mat(ctx, k, q); // [N * n_head, n_token, n_k]
680680
kq = ggml_scale_inplace(ctx, kq, 1.0f / sqrt(d_head));
681681
if (mask) {
682682
kq = ggml_diag_mask_inf_inplace(ctx, kq, 0);
683683
}
684-
kq = ggml_soft_max_inplace(ctx, kq);
684+
kq = ggml_soft_max_inplace(ctx, kq);
685685
struct ggml_tensor* kqv = ggml_mul_mat(ctx, v, kq); // [N * n_head, n_token, d_head]
686686
#endif
687687
return kqv;

lora.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ struct LoraModel : public GGMLRunner {
1717

1818
LoraModel(ggml_backend_t backend,
1919
const std::string& file_path = "",
20-
const std::string prefix = "")
20+
const std::string prefix = "")
2121
: file_path(file_path), GGMLRunner(backend) {
2222
if (!model_loader.init_from_file(file_path, prefix)) {
2323
load_failed = true;

mmdit.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -678,7 +678,7 @@ struct MMDiT : public GGMLBlock {
678678
continue;
679679
size_t jb = tensor_name.find("joint_blocks.");
680680
if (jb != std::string::npos) {
681-
tensor_name = tensor_name.substr(jb); // remove prefix
681+
tensor_name = tensor_name.substr(jb); // remove prefix
682682
int block_depth = atoi(tensor_name.substr(13, tensor_name.find(".", 13)).c_str());
683683
if (block_depth + 1 > depth) {
684684
depth = block_depth + 1;

t5.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -357,7 +357,7 @@ class T5UniGramTokenizer {
357357

358358
BuildTrie(&pieces);
359359
}
360-
~T5UniGramTokenizer() {};
360+
~T5UniGramTokenizer(){};
361361

362362
std::string Normalize(const std::string& input) const {
363363
// Ref: https://github.com/huggingface/tokenizers/blob/1ff56c0c70b045f0cd82da1af9ac08cd4c7a6f9f/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py#L29

tae.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ struct TinyAutoEncoder : public GGMLRunner {
196196
GGMLRunner(backend) {
197197
taesd.init(params_ctx, tensor_types, prefix);
198198
}
199-
199+
200200
std::string get_desc() {
201201
return "taesd";
202202
}

0 commit comments

Comments
 (0)