@@ -925,7 +925,7 @@ struct common_init_result common_init_from_params(common_params & params) {
925925 // load and optionally apply lora adapters
926926 for (auto & la : params.lora_adapters ) {
927927 llama_lora_adapter_ptr lora;
928- lora.reset (llama_lora_adapter_init (model, la.path .c_str ()));
928+ lora.reset (llama_lora_adapter_init (model, la.path .c_str (), mparams. no_byteswap ));
929929 if (lora == nullptr ) {
930930 LOG_ERR (" %s: failed to apply lora adapter '%s'\n " , __func__, la.path .c_str ());
931931 llama_free (lctx);
@@ -1030,6 +1030,7 @@ struct llama_model_params common_model_params_to_llama(common_params & params) {
10301030 mparams.use_mmap = params.use_mmap ;
10311031 mparams.use_mlock = params.use_mlock ;
10321032 mparams.check_tensors = params.check_tensors ;
1033+ mparams.no_byteswap = params.no_byteswap ;
10331034 if (params.kv_overrides .empty ()) {
10341035 mparams.kv_overrides = NULL ;
10351036 } else {
@@ -1357,8 +1358,9 @@ struct llama_model * common_load_model_from_url(
13571358 int n_split = 0 ;
13581359 {
13591360 struct gguf_init_params gguf_params = {
1360- /* .no_alloc = */ true ,
1361- /* .ctx = */ NULL ,
1361+ /* .no_alloc = */ true ,
1362+ /* .ctx = */ NULL ,
1363+ /* .no_byteswap = */ false ,
13621364 };
13631365 auto * ctx_gguf = gguf_init_from_file (local_path.c_str (), gguf_params);
13641366 if (!ctx_gguf) {
@@ -1856,8 +1858,9 @@ static common_control_vector_data common_control_vector_load_one(const common_co
18561858
18571859 ggml_context * ctx = nullptr ;
18581860 struct gguf_init_params meta_gguf_params = {
1859- /* .no_alloc = */ false ,
1860- /* .ctx = */ &ctx,
1861+ /* .no_alloc = */ false ,
1862+ /* .ctx = */ &ctx,
1863+ /* .no_byteswap = */ false ,
18611864 };
18621865 struct gguf_context * ctx_gguf = gguf_init_from_file (load_info.fname .c_str (), meta_gguf_params);
18631866 if (!ctx_gguf) {
0 commit comments