Skip to content

Commit aee57d4

Browse files
committed
no longer necessary to disambiguate common functions with ::
1 parent e58d3b1 commit aee57d4

File tree

21 files changed

+48
-48
lines changed

21 files changed

+48
-48
lines changed

examples/batched/batched.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ int main(int argc, char ** argv) {
5151
// tokenize the prompt
5252

5353
std::vector<llama_token> tokens_list;
54-
tokens_list = ::common_tokenize(model, params.prompt, true);
54+
tokens_list = common_tokenize(model, params.prompt, true);
5555

5656
const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel;
5757

examples/cvector-generator/cvector-generator.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -272,16 +272,16 @@ struct tokenized_prompt {
272272

273273
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
274274
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
275-
tokens_pos = ::common_tokenize(ctx, pos, add_bos, true);
276-
tokens_neg = ::common_tokenize(ctx, neg, add_bos, true);
275+
tokens_pos = common_tokenize(ctx, pos, add_bos, true);
276+
tokens_neg = common_tokenize(ctx, neg, add_bos, true);
277277
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
278278
padding_seq(ctx, tokens_pos, max_seq_len);
279279
padding_seq(ctx, tokens_neg, max_seq_len);
280280
}
281281

282282
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
283283
// TODO: customize padding token
284-
std::vector<llama_token> pad_tokens = ::common_tokenize(ctx, " ", false);
284+
std::vector<llama_token> pad_tokens = common_tokenize(ctx, " ", false);
285285
llama_token pad_tok = pad_tokens.back();
286286
while (tokens.size() < len) {
287287
tokens.push_back(pad_tok);

examples/embedding/embedding.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ int main(int argc, char ** argv) {
135135
// tokenize the prompts and trim
136136
std::vector<std::vector<int32_t>> inputs;
137137
for (const auto & prompt : prompts) {
138-
auto inp = ::common_tokenize(ctx, prompt, true, true);
138+
auto inp = common_tokenize(ctx, prompt, true, true);
139139
if (inp.size() > n_batch) {
140140
LOG_ERR("%s: number of tokens in input line (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
141141
__func__, (long long int) inp.size(), (long long int) n_batch);

examples/eval-callback/eval-callback.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ static bool ggml_debug(struct ggml_tensor * t, bool ask, void * user_data) {
129129
static bool run(llama_context * ctx, const gpt_params & params) {
130130
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
131131

132-
std::vector<llama_token> tokens = ::common_tokenize(ctx, params.prompt, add_bos);
132+
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, add_bos);
133133

134134
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
135135
LOG_ERR("%s : failed to eval\n", __func__);

examples/imatrix/imatrix.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -436,7 +436,7 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
436436
auto tim1 = std::chrono::high_resolution_clock::now();
437437
LOG_INF("%s: tokenizing the input ..\n", __func__);
438438

439-
std::vector<llama_token> tokens = ::common_tokenize(ctx, params.prompt, true);
439+
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, true);
440440

441441
auto tim2 = std::chrono::high_resolution_clock::now();
442442
LOG_INF("%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());

examples/infill/infill.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -202,8 +202,8 @@ int main(int argc, char ** argv) {
202202

203203
std::vector<llama_token> embd_inp;
204204
std::vector<llama_token> embd_end;
205-
std::vector<llama_token> inp_pfx = ::common_tokenize(ctx, params.input_prefix, false);
206-
std::vector<llama_token> inp_sfx = ::common_tokenize(ctx, params.input_suffix, false);
205+
std::vector<llama_token> inp_pfx = common_tokenize(ctx, params.input_prefix, false);
206+
std::vector<llama_token> inp_sfx = common_tokenize(ctx, params.input_suffix, false);
207207

208208
GGML_ASSERT(llama_token_prefix(model) >= 0);
209209
GGML_ASSERT(llama_token_suffix(model) >= 0);
@@ -505,8 +505,8 @@ int main(int argc, char ** argv) {
505505
}
506506

507507
// tokenize new prefix and suffix
508-
std::vector<llama_token> inp_pfx = ::common_tokenize(ctx, params.input_prefix, false);
509-
std::vector<llama_token> inp_sfx = ::common_tokenize(ctx, params.input_suffix, false);
508+
std::vector<llama_token> inp_pfx = common_tokenize(ctx, params.input_prefix, false);
509+
std::vector<llama_token> inp_sfx = common_tokenize(ctx, params.input_suffix, false);
510510

511511
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
512512
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
@@ -579,7 +579,7 @@ int main(int argc, char ** argv) {
579579

580580
const size_t original_size = embd_inp.size();
581581

582-
const auto line_inp = ::common_tokenize(ctx, buffer, false);
582+
const auto line_inp = common_tokenize(ctx, buffer, false);
583583
LOG_DBG("input tokens: %s\n", string_from(ctx, line_inp).c_str());
584584

585585
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());

examples/llava/llava-cli.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
3737

3838
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
3939
std::string str2 = str;
40-
std::vector<llama_token> embd_inp = ::common_tokenize(ctx_llama, str2, add_bos, true);
40+
std::vector<llama_token> embd_inp = common_tokenize(ctx_llama, str2, add_bos, true);
4141
eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
4242
return true;
4343
}
@@ -159,14 +159,14 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
159159
user_prompt = prompt.substr(image_pos + std::string("<image>").length());
160160
LOG_INF("system_prompt: %s\n", system_prompt.c_str());
161161
if (params->verbose_prompt) {
162-
auto tmp = ::common_tokenize(ctx_llava->ctx_llama, system_prompt, true, true);
162+
auto tmp = common_tokenize(ctx_llava->ctx_llama, system_prompt, true, true);
163163
for (int i = 0; i < (int) tmp.size(); i++) {
164164
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
165165
}
166166
}
167167
LOG_INF("user_prompt: %s\n", user_prompt.c_str());
168168
if (params->verbose_prompt) {
169-
auto tmp = ::common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
169+
auto tmp = common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
170170
for (int i = 0; i < (int) tmp.size(); i++) {
171171
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
172172
}
@@ -176,7 +176,7 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
176176
system_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:";
177177
user_prompt = prompt + "\nASSISTANT:";
178178
if (params->verbose_prompt) {
179-
auto tmp = ::common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
179+
auto tmp = common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
180180
for (int i = 0; i < (int) tmp.size(); i++) {
181181
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
182182
}

examples/llava/minicpmv-cli.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
114114

115115
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
116116
std::string str2 = str;
117-
std::vector<llama_token> embd_inp = ::common_tokenize(ctx_llama, str2, add_bos, true);
117+
std::vector<llama_token> embd_inp = common_tokenize(ctx_llama, str2, add_bos, true);
118118
return eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
119119
}
120120

examples/lookahead/lookahead.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ int main(int argc, char ** argv) {
6565
std::vector<llama_token> inp;
6666
std::vector<llama_token> all;
6767

68-
inp = ::common_tokenize(ctx, params.prompt, true, true);
68+
inp = common_tokenize(ctx, params.prompt, true, true);
6969
all = inp;
7070

7171
const int max_context_size = llama_n_ctx(ctx);

examples/lookup/lookup-create.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ int main(int argc, char ** argv){
3131

3232
// tokenize the prompt
3333
std::vector<llama_token> inp;
34-
inp = ::common_tokenize(ctx, params.prompt, true, true);
34+
inp = common_tokenize(ctx, params.prompt, true, true);
3535
fprintf(stderr, "%s: tokenization done\n", __func__);
3636

3737

0 commit comments

Comments
 (0)