@@ -987,7 +987,7 @@ struct common_init_result common_init_from_params(common_params & params) {
987987 // load and optionally apply lora adapters
988988 for (auto & la : params.lora_adapters ) {
989989 llama_adapter_lora_ptr lora;
990- lora.reset (llama_adapter_lora_init (model, la.path .c_str ()));
990+ lora.reset (llama_adapter_lora_init (model, la.path .c_str (), mparams. no_byteswap ));
991991 if (lora == nullptr ) {
992992 LOG_ERR (" %s: failed to apply lora adapter '%s'\n " , __func__, la.path .c_str ());
993993 llama_free (lctx);
@@ -1092,6 +1092,7 @@ struct llama_model_params common_model_params_to_llama(common_params & params) {
10921092 mparams.use_mmap = params.use_mmap ;
10931093 mparams.use_mlock = params.use_mlock ;
10941094 mparams.check_tensors = params.check_tensors ;
1095+ mparams.no_byteswap = params.no_byteswap ;
10951096 if (params.kv_overrides .empty ()) {
10961097 mparams.kv_overrides = NULL ;
10971098 } else {
@@ -1418,8 +1419,9 @@ struct llama_model * common_load_model_from_url(
14181419 int n_split = 0 ;
14191420 {
14201421 struct gguf_init_params gguf_params = {
1421- /* .no_alloc = */ true ,
1422- /* .ctx = */ NULL ,
1422+ /* .no_alloc = */ true ,
1423+ /* .ctx = */ NULL ,
1424+ /* .no_byteswap = */ false ,
14231425 };
14241426 auto * ctx_gguf = gguf_init_from_file (local_path.c_str (), gguf_params);
14251427 if (!ctx_gguf) {
@@ -2063,8 +2065,9 @@ static common_control_vector_data common_control_vector_load_one(const common_co
20632065
20642066 ggml_context * ctx = nullptr ;
20652067 struct gguf_init_params meta_gguf_params = {
2066- /* .no_alloc = */ false ,
2067- /* .ctx = */ &ctx,
2068+ /* .no_alloc = */ false ,
2069+ /* .ctx = */ &ctx,
2070+ /* .no_byteswap = */ false ,
20682071 };
20692072 struct gguf_context * ctx_gguf = gguf_init_from_file (load_info.fname .c_str (), meta_gguf_params);
20702073 if (!ctx_gguf) {
0 commit comments