Skip to content

Commit 8cbd5a4

Browse files
authored
Merge pull request #60 from tsisw/llama-cpp-performance-ops
@FIR-1001 - GGML: Tsavorite Performance OPs data
2 parents c806b46 + b6fd008 commit 8cbd5a4

File tree

7 files changed

+42
-30
lines changed

7 files changed

+42
-30
lines changed

ggml/include/ggml.h

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -343,13 +343,13 @@ extern "C" {
343343
GGML_NORETURN GGML_ATTRIBUTE_FORMAT(3, 4)
344344
GGML_API void ggml_abort(const char * file, int line, const char * fmt, ...);
345345

346-
#ifdef GGML_PERF
346+
#if defined(GGML_PERF) || defined(GGML_PERF_DETAIL)
347347
enum ggml_compute_backend_type {
348348
GGML_COMPUTE_BACKEND_CPU=0,
349349
GGML_COMPUTE_BACKEND_TSAVORITE,
350350
GGML_COMPUTE_BACKEND_COUNT
351351
};
352-
#endif /* GGML_PERF */
352+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
353353

354354
enum ggml_status {
355355
GGML_STATUS_ALLOC_FAILED = -2,
@@ -659,14 +659,15 @@ extern "C" {
659659
char name[GGML_MAX_NAME];
660660

661661
void * extra; // extra things e.g. for ggml-cuda.cu
662-
#ifdef GGML_PERF
662+
663+
#if defined(GGML_PERF) || defined(GGML_PERF_DETAIL)
663664
int64_t perf_runs;
664665
int64_t perf_time_us;
665666
enum ggml_compute_backend_type ggml_compute_backend;
666667
char padding[4];
667668
#else
668669
char padding[8];
669-
#endif /* GGML_PERF */
670+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
670671
};
671672

672673
static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
@@ -2556,7 +2557,7 @@ extern "C" {
25562557
GGML_API void ggml_threadpool_params_init (struct ggml_threadpool_params * p, int n_threads);
25572558
GGML_API bool ggml_threadpool_params_match (const struct ggml_threadpool_params * p0, const struct ggml_threadpool_params * p1);
25582559

2559-
#ifdef GGML_PERF
2560+
#if defined(GGML_PERF) || defined(GGML_PERF_DETAIL)
25602561
struct ggml_perf_backend_subtotals {
25612562
int64_t total_us;
25622563
int64_t runs;
@@ -2586,7 +2587,7 @@ void ggml_perf_write_detailed_csv(struct ggml_cgraph * cgraph, FILE *fp);
25862587
void ggml_perf_accumulate(struct ggml_perf_totals totals[GGML_OP_COUNT], struct ggml_cgraph * cgraph);
25872588
const char * ggml_backend_type(enum ggml_compute_backend_type backend);
25882589

2589-
#endif /* GGML_PERF */
2590+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
25902591

25912592
#ifdef __cplusplus
25922593
}

ggml/src/ggml-cpu/ggml-cpu.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2879,12 +2879,12 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
28792879
for (int node_n = 0; node_n < cgraph->n_nodes && atomic_load_explicit(&tp->abort, memory_order_relaxed) != node_n; node_n++) {
28802880
struct ggml_tensor * node = cgraph->nodes[node_n];
28812881

2882-
#ifdef GGML_PERF
2882+
#if defined(GGML_PERF) || defined(GGML_PERF_DETAIL)
28832883
int64_t t_start = ggml_time_us();
2884-
#endif
2884+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
28852885
ggml_compute_forward(&params, node);
28862886

2887-
#ifdef GGML_PERF
2887+
#if defined(GGML_PERF) || defined(GGML_PERF_DETAIL)
28882888
int64_t t_end = ggml_time_us();
28892889
node->perf_runs++;
28902890
if (t_end >= t_start) {
@@ -2893,7 +2893,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
28932893
// Handle wraparound by assuming timer rolls over at max int64_t value
28942894
node->perf_time_us += (INT64_MAX - t_start + t_end + 1);
28952895
}
2896-
#endif
2896+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
28972897
if (state->ith == 0 && cplan->abort_callback &&
28982898
cplan->abort_callback(cplan->abort_callback_data)) {
28992899
atomic_store_explicit(&tp->abort, node_n + 1, memory_order_relaxed);

ggml/src/ggml-tsavorite/ggml-tsavorite.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -929,9 +929,9 @@ static enum ggml_status ggml_tsavorite_graph_compute(ggml_backend_t backend,
929929

930930
for (int i = 0; i < cgraph->n_nodes; i++) {
931931
int32_t kernel_sub_type=-1;
932-
#ifdef GGML_PERF
932+
#if defined(GGML_PERF) || defined(GGML_PERF_DETAIL)
933933
int64_t t_start = ggml_time_us();
934-
#endif
934+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
935935
node = cgraph->nodes[i];
936936
src0 = node->src[0];
937937
src1 = node->src[1];
@@ -1279,7 +1279,7 @@ static enum ggml_status ggml_tsavorite_graph_compute(ggml_backend_t backend,
12791279
device->stats.op_run_count[kernel_type].max_num_of_elem < max_num_of_elem)
12801280
device->stats.op_run_count[kernel_type].max_num_of_elem = max_num_of_elem;
12811281
}
1282-
#ifdef GGML_PERF
1282+
#if defined(GGML_PERF) || defined(GGML_PERF_DETAIL)
12831283
int64_t t_end = ggml_time_us();
12841284
node->perf_runs++;
12851285
node->ggml_compute_backend = GGML_COMPUTE_BACKEND_TSAVORITE;
@@ -1289,7 +1289,7 @@ static enum ggml_status ggml_tsavorite_graph_compute(ggml_backend_t backend,
12891289
// Handle wraparound by assuming timer rolls over at max int64_t value
12901290
node->perf_time_us += (INT64_MAX - t_start + t_end + 1);
12911291
}
1292-
#endif
1292+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
12931293
}
12941294

12951295
// This this need to implement correctly when we have mixture of CPU and accelerator operation

ggml/src/ggml.c

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1020,12 +1020,12 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
10201020
"GLU",
10211021
};
10221022

1023-
#ifdef GGML_PERF
1023+
#if defined(GGML_PERF) || defined(GGML_PERF_DETAIL)
10241024
static const char * GGML_BACKEND_TYPE[GGML_COMPUTE_BACKEND_COUNT] = {
10251025
"CPU",
10261026
"OPU"
10271027
};
1028-
#endif /* GGML_PERF */
1028+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
10291029

10301030
static_assert(GGML_OP_COUNT == 90, "GGML_OP_COUNT != 90");
10311031

@@ -1262,11 +1262,11 @@ const char * ggml_op_name(enum ggml_op op) {
12621262
return GGML_OP_NAME[op];
12631263
}
12641264

1265-
#ifdef GGML_PERF
1265+
#if defined(GGML_PERF) || defined(GGML_PERF_DETAIL)
12661266
const char * ggml_backend_type(enum ggml_compute_backend_type backend) {
12671267
return GGML_BACKEND_TYPE[backend];
12681268
}
1269-
#endif /* GGML_PERF */
1269+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
12701270

12711271
const char * ggml_op_symbol(enum ggml_op op) {
12721272
return GGML_OP_SYMBOL[op];
@@ -1692,11 +1692,11 @@ static struct ggml_tensor * ggml_new_tensor_impl(
16921692
/*.data =*/ obj_alloc_size > 0 ? (void *)(result + 1) : data,
16931693
/*.name =*/ { 0 },
16941694
/*.extra =*/ NULL,
1695-
#ifdef GGML_PERF
1695+
#if defined(GGML_PERF) || defined(GGML_PERF_DETAIL)
16961696
/*.perf_runs =*/ 0,
16971697
/*.perf_time_us =*/ 0,
16981698
/*.ggml_compute_backend =*/ GGML_COMPUTE_BACKEND_CPU,
1699-
#endif /* GGML_PERF */
1699+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
17001700
/*.padding =*/ { 0 },
17011701
};
17021702

@@ -7231,7 +7231,7 @@ bool ggml_threadpool_params_match(const struct ggml_threadpool_params * p0, cons
72317231
return memcmp(p0->cpumask, p1->cpumask, GGML_MAX_N_THREADS) == 0;
72327232
}
72337233

7234-
#ifdef GGML_PERF
7234+
#if defined(GGML_PERF) || defined(GGML_PERF_DETAIL)
72357235
void ggml_perf_accumulate(struct ggml_perf_totals totals[GGML_OP_COUNT], struct ggml_cgraph * cgraph) {
72367236
for (int i = 0; i < cgraph->n_nodes; ++i) {
72377237
struct ggml_tensor * node = cgraph->nodes[i];
@@ -7258,7 +7258,9 @@ void ggml_perf_accumulate(struct ggml_perf_totals totals[GGML_OP_COUNT], struct
72587258
}
72597259
}
72607260
}
7261+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
72617262

7263+
#if defined(GGML_PERF_DETAIL)
72627264
FILE * ggml_perf_log_open(const char *filename) {
72637265
// Try to delete existing file, ignore error if it doesn't exist
72647266
remove(filename);
@@ -7326,4 +7328,4 @@ void ggml_perf_write_detailed_csv(struct ggml_cgraph * cgraph, FILE *fp) {
73267328

73277329
fprintf(fp, "--------------------------------------------------------------------------------------------------------\n\n");
73287330
}
7329-
#endif /* GGML_PERF */
7331+
#endif /* GGML_PERF_DETAIL */

src/llama-context.cpp

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1090,6 +1090,14 @@ int llama_context::decode(const llama_batch & batch_inp) {
10901090
ggml_status status;
10911091
const auto * res = process_ubatch(ubatch, LLM_GRAPH_TYPE_DECODER, mctx.get(), status);
10921092

1093+
#if defined(GGML_PERF)
1094+
ggml_perf_accumulate(perf_totals, res->get_gf());
1095+
#elif defined(GGML_PERF_DETAIL)
1096+
ggml_perf_accumulate(perf_totals, res->get_gf());
1097+
ggml_perf_write_detailed_csv(res->get_gf(), perf_all_shape_fp);
1098+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
1099+
1100+
10931101
if (!res) {
10941102
// the last ubatch failed or was aborted -> remove all positions of that ubatch from the memory module
10951103
llama_pos pos_min[LLAMA_MAX_SEQ];
@@ -2751,7 +2759,7 @@ llama_perf_context_data llama_perf_context(const llama_context * ctx) {
27512759
}
27522760

27532761

2754-
#ifdef GGML_PERF
2762+
#if defined(GGML_PERF)
27552763
void ggml_perf_print_totals(struct ggml_perf_totals totals[GGML_OP_COUNT]) {
27562764
LLAMA_LOG_TSAVORITE("\n=== GGML Perf Summary ===\n");
27572765
LLAMA_LOG_TSAVORITE(" %-16s %7s %14s %16s\n", "Op", "Runs", "Total us", "Avg us");
@@ -2779,7 +2787,8 @@ void ggml_perf_print_totals(struct ggml_perf_totals totals[GGML_OP_COUNT]) {
27792787
}
27802788
}
27812789
}
2782-
#elif GGML_PERF_DETAIL
2790+
2791+
#elif defined(GGML_PERF_DETAIL)
27832792
void ggml_perf_print_totals(struct ggml_perf_totals totals[GGML_OP_COUNT]) {
27842793
LLAMA_LOG_TSAVORITE("\n=== GGML Perf Summary ===\n");
27852794
LLAMA_LOG_TSAVORITE(" %-16s %-8s %7s %14s %16s\n", "Op", "Target", "Runs", "Total us", "Avg us");
@@ -2843,7 +2852,7 @@ void llama_perf_context_print(const llama_context * ctx) {
28432852
__func__, data.t_eval_ms, data.n_eval, data.t_eval_ms / data.n_eval, 1e3 / data.t_eval_ms * data.n_eval);
28442853
LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (t_end_ms - data.t_start_ms), (data.n_p_eval + data.n_eval));
28452854

2846-
#ifdef GGML_PERF
2855+
#if defined(GGML_PERF) || defined(GGML_PERF_DETAIL)
28472856
LLAMA_LOG_TSAVORITE("\n%s: load time = %10.2f ms\n", __func__, data.t_load_ms);
28482857
LLAMA_LOG_TSAVORITE("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
28492858
__func__, data.t_p_eval_ms, data.n_p_eval, data.t_p_eval_ms / data.n_p_eval, 1e3 / data.t_p_eval_ms * data.n_p_eval);
@@ -2852,7 +2861,7 @@ void llama_perf_context_print(const llama_context * ctx) {
28522861
LLAMA_LOG_TSAVORITE("%s: total time = %10.2f ms / %5d tokens\n", __func__, (t_end_ms - data.t_start_ms), (data.n_p_eval + data.n_eval));
28532862

28542863
ggml_perf_print_totals(const_cast<ggml_perf_totals *>(ctx->perf_totals));
2855-
#endif /* GGML_PERF */
2864+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
28562865
}
28572866

28582867
void llama_perf_context_reset(llama_context * ctx) {

src/llama-context.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -198,9 +198,9 @@ struct llama_context {
198198

199199
// reserve a graph with a dummy ubatch of the specified size
200200
ggml_cgraph * graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx, bool split_only = false);
201-
#ifdef GGML_PERF
201+
#if defined(GGML_PERF) || defined(GGML_PERF_DETAIL)
202202
struct ggml_perf_totals perf_totals[GGML_OP_COUNT] = {}; // add this to llama_context
203-
#endif
203+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
204204

205205
private:
206206
llm_graph_params graph_params(

tools/main/main.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -126,9 +126,9 @@ int main(int argc, char ** argv) {
126126
LOG_WRN("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale);
127127
}
128128

129-
#ifdef GGML_PERF
129+
#if defined(GGML_PERF) || defined(GGML_PERF_DETAIL)
130130
llama_log_set(my_logger, nullptr);
131-
#endif /* GGML_PERF */
131+
#endif /* GGML_PERF || GGML_PERF_DETAIL */
132132
LOG_INF("%s: llama backend init\n", __func__);
133133

134134
llama_backend_init();

0 commit comments

Comments
 (0)