File tree Expand file tree Collapse file tree 1 file changed +4
-4
lines changed Expand file tree Collapse file tree 1 file changed +4
-4
lines changed Original file line number Diff line number Diff line change @@ -118,11 +118,11 @@ static void usage(const char * executable) {
118118 printf (" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n\n " );
119119 printf (" Note: --include-weights and --exclude-weights cannot be used together\n " );
120120 printf (" Note: The token embeddings tensor is loaded in system RAM, even in case of full GPU/VRAM offload.\n " );
121- printf (" Note: The recommanded type for the output tensor is q6_K for the ffn types > iq3_xxs and < q8_0.\n " );
122- printf (" Note: Usually, attn-q-type can be one type below the chosen ffn type, and attn-v-type should be one type above.\n " );
121+ printf (" Note: The recommanded type for the output tensor is q6_K for the ffn types > iq3_xxs and < q8_0.\n " );
122+ printf (" Note: Usually, attn-q-type can be one type below the chosen ffn type, and attn-v-type should be one type above.\n " );
123123 printf (" Note: --attn-qkv-type replaces the types attn-q, attn-k, and attn-v on some models.\n " );
124- printf (" Note: Write the specific tensor legacy quants as qN_N, the K-Quants as qN_K, the IQ-Quants as iqN_xx.\n " );
125- // TODO: - eventually - harmonize the CAPS writing of the FTYPEs, and non CAPS writing of the GGML_TYPEs.
124+ printf (" Note: Write the specific tensor legacy quants as qN_N, the K-Quants as qN_K, the IQ-Quants as iqN_xx.\n " );
125+ // TODO: - eventually - harmonize the CAPS writing of the FTYPEs, and non CAPS writing of the GGML_TYPEs.
126126 printf (" \n Allowed quantization types:\n " );
127127 for (auto & it : QUANT_OPTIONS) {
128128 if (it.name != " COPY" ) {
You can’t perform that action at this time.
0 commit comments