@@ -102,7 +102,7 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
102102//
103103[[noreturn]]
104104static void usage (const char * executable) {
105- printf (" usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--attn-q-type] [--attn-k-type] [--attn-v-type] [--attn-qkv-type] [--attn-output-type] [--ffn-gate-type] [--ffn-down-type] [--ffn-up-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n " , executable);
105+ printf (" usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--attn-q-type] [--attn-k-type] [--attn-v-type] [--attn-qkv-type] [--attn-output-type] [--attn-norm-type] [--ffn-norm-type] [-- ffn-gate-type] [--ffn-down-type] [--ffn-up-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n " , executable);
106106 printf (" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n " );
107107 printf (" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n " );
108108 printf (" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n " );
@@ -117,6 +117,8 @@ static void usage(const char * executable) {
117117 printf (" --attn-v-type ggml_type: use this ggml_type for the attn_v.weight tensor.\n " );
118118 printf (" --attn-qkv-type ggml_type: use this ggml_type for the attn_qkv.weight tensor.\n " );
119119 printf (" --attn-output-type ggml_type: use this ggml_type for the attn_output.weight tensor.\n " );
120+ printf (" --attn-norm-type ggml_type: use this ggml_type instead of F32 for the tiny attn_norm.weight tensor.\n " );
121+ printf (" --ffn-norm-type ggml_type: use this ggml_type instead of F32 for the tiny ffn_norm tensor.\n " );
120122 printf (" --ffn-gate-type ggml_type: use this ggml_type for the ffn_gate tensor.\n " );
121123 printf (" --ffn-down-type ggml_type: use this ggml_type for the ffn_down tensor.\n " );
122124 printf (" --ffn-up-type ggml_type: use this ggml_type for the ffn_up tensor.\n\n " );
@@ -314,6 +316,18 @@ int main(int argc, char ** argv) {
314316 } else {
315317 usage (argv[0 ]);
316318 }
319+ } else if (strcmp (argv[arg_idx], " --attn-norm-type" ) == 0 ) {
320+ if (arg_idx < argc-1 ) {
321+ params.attn_norm_type = parse_ggml_type (argv[++arg_idx]);
322+ } else {
323+ usage (argv[0 ]);
324+ }
325+ } else if (strcmp (argv[arg_idx], " --ffn-norm-type" ) == 0 ) {
326+ if (arg_idx < argc-1 ) {
327+ params.ffn_norm_type = parse_ggml_type (argv[++arg_idx]);
328+ } else {
329+ usage (argv[0 ]);
330+ }
317331 } else if (strcmp (argv[arg_idx], " --ffn-gate-type" ) == 0 ) {
318332 if (arg_idx < argc-1 ) {
319333 params.ffn_gate_type = parse_ggml_type (argv[++arg_idx]);
0 commit comments