@@ -922,14 +922,14 @@ struct common_init_result common_init_from_params(common_params & params) {
922922        common_lora_adapter_container loaded_la;
923923        loaded_la.path  = la.path ;
924924        loaded_la.scale  = la.scale ;
925-         loaded_la.adapter  =  llama_lora_adapter_init (model, la.path .c_str ());
925+         loaded_la.adapter . reset ( llama_lora_adapter_init (model, la.path .c_str () ));
926926        if  (loaded_la.adapter  == nullptr ) {
927927            LOG_ERR (" %s: failed to apply lora adapter '%s'\n "  , __func__, la.path .c_str ());
928928            llama_free (lctx);
929929            llama_free_model (model);
930930            return  iparams;
931931        }
932-         iparams.lora_adapters .push_back ( loaded_la); //  copy to list of loaded adapters
932+         iparams.lora_adapters .emplace_back ( std::move ( loaded_la) ); //  copy to list of loaded adapters
933933    }
934934    if  (!params.lora_init_without_apply ) {
935935        common_lora_adapters_apply (lctx, iparams.lora_adapters );
@@ -993,8 +993,8 @@ struct common_init_result common_init_from_params(common_params & params) {
993993        llama_perf_context_reset (lctx);
994994    }
995995
996-     iparams.model    =  model;
997-     iparams.context  =  lctx;
996+     iparams.model . reset ( model) ;
997+     iparams.context . reset ( lctx) ;
998998
999999    return  iparams;
10001000}
@@ -1003,7 +1003,7 @@ void common_lora_adapters_apply(struct llama_context * ctx, std::vector<common_l
10031003    llama_lora_adapter_clear (ctx);
10041004    for  (auto  & la : lora_adapters) {
10051005        if  (la.scale  != 0 .0f ) {
1006-             llama_lora_adapter_set (ctx, la.adapter , la.scale );
1006+             llama_lora_adapter_set (ctx, la.adapter . get () , la.scale );
10071007        }
10081008    }
10091009}
0 commit comments