File tree Expand file tree Collapse file tree 1 file changed +4
-4
lines changed Expand file tree Collapse file tree 1 file changed +4
-4
lines changed Original file line number Diff line number Diff line change @@ -356,10 +356,10 @@ extern "C" {
356356
357357 // model quantization parameters
358358 typedef struct llama_model_quantize_params {
359- int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
360- llama_ftype ftype; // quantize to this llama_ftype
361- ggml_type output_tensor_type; // output tensor type
362- ggml_type token_embedding_type; // token embeddings tensor type
359+ int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
360+ enum llama_ftype ftype; // quantize to this llama_ftype
361+ enum ggml_type output_tensor_type; // output tensor type
362+ enum ggml_type token_embedding_type; // token embeddings tensor type
363363 bool allow_requantize; // allow quantizing non-f32/f16 tensors
364364 bool quantize_output_tensor; // quantize output.weight
365365 bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
You can’t perform that action at this time.
0 commit comments