Skip to content

Commit 83a90c9

Browse files
committed
better names for common params fns
1 parent 6ea0304 commit 83a90c9

File tree

25 files changed

+41
-41
lines changed

25 files changed

+41
-41
lines changed

common/common.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -819,9 +819,9 @@ std::string fs_get_cache_file(const std::string & filename) {
819819
//
820820
// Model utils
821821
//
822-
struct common_init_result common_init_from_common_params(common_params & params) {
822+
struct common_init_result common_init_from_params(common_params & params) {
823823
common_init_result iparams;
824-
auto mparams = common_model_params_from_common_params(params);
824+
auto mparams = common_model_params_to_llama(params);
825825

826826
llama_model * model = nullptr;
827827

@@ -863,7 +863,7 @@ struct common_init_result common_init_from_common_params(common_params & params)
863863
}
864864
}
865865

866-
auto cparams = common_context_params_from_common_params(params);
866+
auto cparams = common_context_params_to_llama(params);
867867

868868
llama_context * lctx = llama_new_context_with_model(model, cparams);
869869
if (lctx == NULL) {
@@ -970,7 +970,7 @@ void common_lora_adapters_apply(struct llama_context * ctx, std::vector<common_l
970970
}
971971
}
972972

973-
struct llama_model_params common_model_params_from_common_params(const common_params & params) {
973+
struct llama_model_params common_model_params_to_llama(const common_params & params) {
974974
auto mparams = llama_model_default_params();
975975

976976
if (params.n_gpu_layers != -1) {
@@ -1022,7 +1022,7 @@ static ggml_type kv_cache_type_from_str(const std::string & s) {
10221022
throw std::runtime_error("Invalid cache type: " + s);
10231023
}
10241024

1025-
struct llama_context_params common_context_params_from_common_params(const common_params & params) {
1025+
struct llama_context_params common_context_params_to_llama(const common_params & params) {
10261026
auto cparams = llama_context_default_params();
10271027

10281028
cparams.n_ctx = params.n_ctx;

common/common.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -410,10 +410,10 @@ struct common_init_result {
410410
std::vector<common_lora_adapter_container> lora_adapters;
411411
};
412412

413-
struct common_init_result common_init_from_common_params(common_params & params);
413+
struct common_init_result common_init_from_params(common_params & params);
414414

415-
struct llama_model_params common_model_params_from_common_params (const common_params & params);
416-
struct llama_context_params common_context_params_from_common_params(const common_params & params);
415+
struct llama_model_params common_model_params_to_llama (const common_params & params);
416+
struct llama_context_params common_context_params_to_llama(const common_params & params);
417417
struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_params & params);
418418

419419
struct llama_model * common_load_model_from_url(const char * model_url, const char * path_model, const char * hf_token, const struct llama_model_params & params);

examples/batched-bench/batched-bench.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ int main(int argc, char ** argv) {
3636

3737
// initialize the model
3838

39-
llama_model_params model_params = common_model_params_from_common_params(params);
39+
llama_model_params model_params = common_model_params_to_llama(params);
4040

4141
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
4242

@@ -45,7 +45,7 @@ int main(int argc, char ** argv) {
4545
return 1;
4646
}
4747

48-
llama_context_params ctx_params = common_context_params_from_common_params(params);
48+
llama_context_params ctx_params = common_context_params_to_llama(params);
4949

5050
// ensure enough sequences are available
5151
ctx_params.n_seq_max = n_pl.empty() ? 1 : *std::max_element(n_pl.begin(), n_pl.end());

examples/batched/batched.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ int main(int argc, char ** argv) {
3939

4040
// initialize the model
4141

42-
llama_model_params model_params = common_model_params_from_common_params(params);
42+
llama_model_params model_params = common_model_params_to_llama(params);
4343

4444
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
4545

@@ -57,7 +57,7 @@ int main(int argc, char ** argv) {
5757

5858
// initialize the context
5959

60-
llama_context_params ctx_params = common_context_params_from_common_params(params);
60+
llama_context_params ctx_params = common_context_params_to_llama(params);
6161

6262
ctx_params.n_ctx = n_kv_req;
6363
ctx_params.n_batch = std::max(n_predict, n_parallel);

examples/cvector-generator/cvector-generator.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -413,7 +413,7 @@ int main(int argc, char ** argv) {
413413
llama_numa_init(params.numa);
414414

415415
// load the model to get hparams
416-
common_init_result llama_init = common_init_from_common_params(params);
416+
common_init_result llama_init = common_init_from_params(params);
417417

418418
llama_model * model = llama_init.model;
419419
llama_context * ctx = llama_init.context;

examples/embedding/embedding.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ int main(int argc, char ** argv) {
9595
llama_numa_init(params.numa);
9696

9797
// load the model
98-
common_init_result llama_init = common_init_from_common_params(params);
98+
common_init_result llama_init = common_init_from_params(params);
9999

100100
llama_model * model = llama_init.model;
101101
llama_context * ctx = llama_init.context;

examples/eval-callback/eval-callback.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ int main(int argc, char ** argv) {
160160
params.warmup = false;
161161

162162
// init
163-
common_init_result llama_init = common_init_from_common_params(params);
163+
common_init_result llama_init = common_init_from_params(params);
164164

165165
llama_model * model = llama_init.model;
166166
llama_context * ctx = llama_init.context;

examples/gritlm/gritlm.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -160,8 +160,8 @@ int main(int argc, char * argv[]) {
160160

161161
common_init();
162162

163-
llama_model_params mparams = common_model_params_from_common_params(params);
164-
llama_context_params cparams = common_context_params_from_common_params(params);
163+
llama_model_params mparams = common_model_params_to_llama(params);
164+
llama_context_params cparams = common_context_params_to_llama(params);
165165

166166
llama_backend_init();
167167

examples/imatrix/imatrix.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -607,7 +607,7 @@ int main(int argc, char ** argv) {
607607
params.warmup = false;
608608

609609
// init
610-
common_init_result llama_init = common_init_from_common_params(params);
610+
common_init_result llama_init = common_init_from_params(params);
611611

612612
llama_model * model = llama_init.model;
613613
llama_context * ctx = llama_init.context;

examples/infill/infill.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ int main(int argc, char ** argv) {
174174

175175
// load the model and apply lora adapter, if any
176176
LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__);
177-
common_init_result llama_init = common_init_from_common_params(params);
177+
common_init_result llama_init = common_init_from_params(params);
178178

179179
model = llama_init.model;
180180
ctx = llama_init.context;

0 commit comments

Comments
 (0)