Skip to content

Commit d146a14

Browse files
committed
context : minor naming fix
1 parent 8da7f61 commit d146a14

File tree

3 files changed

+13
-13
lines changed

3 files changed

+13
-13
lines changed

src/llama-context.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ bool llama_context::apply_adapter_cvec(
189189
return cvec.apply(model, data, len, n_embd, il_start, il_end);
190190
}
191191

192-
llama_perf_context_data llama_context::get_perf() const {
192+
llama_perf_context_data llama_context::perf_get_data() const {
193193
llama_perf_context_data data = {};
194194

195195
data.t_start_ms = 1e-3 * t_start_us;

src/llama-context.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,6 @@ struct llama_context {
9494
//
9595
virtual int decode(llama_batch & inp_batch) = 0;
9696

97-
9897
// encode a batch of tokens by evaluating the encoder part of the transformer
9998
//
10099
// - lctx: llama context
@@ -296,7 +295,7 @@ struct llama_context {
296295

297296
// perf
298297

299-
virtual llama_perf_context_data get_perf() const;
298+
virtual llama_perf_context_data perf_get_data() const;
300299
virtual void perf_reset();
301300

302301
// members
@@ -326,20 +325,21 @@ struct llama_context {
326325

327326
bool has_evaluated_once = false;
328327

329-
mutable int64_t t_start_us;
330-
mutable int64_t t_load_us;
328+
mutable int64_t t_start_us = 0;
329+
mutable int64_t t_load_us = 0;
331330
mutable int64_t t_p_eval_us = 0;
332331
mutable int64_t t_eval_us = 0;
333332

334333
mutable int64_t t_compute_start_us = 0;
335-
mutable int64_t n_queued_tokens = 0;
334+
mutable int64_t n_queued_tokens = 0;
336335

337336
mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
338337
mutable int32_t n_eval = 0; // number of eval calls
339338
};
340339

341340
// TODO: make implementation details private
342-
struct llama_context_unified : public llama_context {
341+
class llama_context_unified : public llama_context {
342+
public:
343343
struct batch_manager;
344344

345345
// TODO: tmp until llama-model starts implementing the graph build function

src/llama.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -54,11 +54,11 @@ enum llm_norm_type {
5454
};
5555

5656
struct llm_build_context {
57-
llama_context & lctx;
58-
const llama_model & model;
59-
const llama_hparams & hparams;
60-
const llama_cparams & cparams;
61-
const llama_ubatch & ubatch;
57+
llama_context & lctx;
58+
const llama_model & model;
59+
const llama_hparams & hparams;
60+
const llama_cparams & cparams;
61+
const llama_ubatch & ubatch;
6262

6363
const int64_t n_embd;
6464
const int64_t n_layer;
@@ -7854,7 +7854,7 @@ struct llama_perf_context_data llama_perf_context(const struct llama_context * c
78547854
return data;
78557855
}
78567856

7857-
data = ctx->get_perf();
7857+
data = ctx->perf_get_data();
78587858

78597859
return data;
78607860
}

0 commit comments

Comments
 (0)