@@ -183,10 +183,10 @@ class GEGLU : public GGMLBlock {
183183 int64_t dim_out;
184184
185185 void init_params (struct ggml_context * ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = " " ) {
186- enum ggml_type wtype = (tensor_types.find (prefix + " proj.weight" ) != tensor_types.end ()) ? tensor_types[prefix + " proj.weight" ] : GGML_TYPE_F32;
187- enum ggml_type bias_wtype = GGML_TYPE_F32;// (tensor_types.find(prefix + "proj.bias") != tensor_types.end()) ? tensor_types[prefix + "proj.bias"] : GGML_TYPE_F32;
188- params[" proj.weight" ] = ggml_new_tensor_2d (ctx, wtype, dim_in, dim_out * 2 );
189- params[" proj.bias" ] = ggml_new_tensor_1d (ctx, bias_wtype, dim_out * 2 );
186+ enum ggml_type wtype = (tensor_types.find (prefix + " proj.weight" ) != tensor_types.end ()) ? tensor_types[prefix + " proj.weight" ] : GGML_TYPE_F32;
187+ enum ggml_type bias_wtype = GGML_TYPE_F32; // (tensor_types.find(prefix + "proj.bias") != tensor_types.end()) ? tensor_types[prefix + "proj.bias"] : GGML_TYPE_F32;
188+ params[" proj.weight" ] = ggml_new_tensor_2d (ctx, wtype, dim_in, dim_out * 2 );
189+ params[" proj.bias" ] = ggml_new_tensor_1d (ctx, bias_wtype, dim_out * 2 );
190190 }
191191
192192public:
@@ -442,7 +442,7 @@ class AlphaBlender : public GGMLBlock {
442442protected:
443443 void init_params (struct ggml_context * ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = " " ) {
444444 // Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix
445- enum ggml_type wtype = GGML_TYPE_F32;// (tensor_types.ypes.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32;
445+ enum ggml_type wtype = GGML_TYPE_F32; // (tensor_types.ypes.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32;
446446 params[" mix_factor" ] = ggml_new_tensor_1d (ctx, wtype, 1 );
447447 }
448448
0 commit comments