Skip to content

Commit fa1d772

Browse files
authored
Merge pull request #66 from tsisw/FIR-1033
@FIR-1033 - llama.cpp crash with below prompt for model SakanaAI-Tiny…
2 parents 461411f + 44f9ba9 commit fa1d772

File tree

1 file changed

+4
-0
lines changed

1 file changed

+4
-0
lines changed

src/llama-context.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1091,10 +1091,14 @@ int llama_context::decode(const llama_batch & batch_inp) {
10911091
const auto * res = process_ubatch(ubatch, LLM_GRAPH_TYPE_DECODER, mctx.get(), status);
10921092

10931093
#if defined(GGML_PERF) || defined(GGML_PERF_RELEASE)
1094+
if (res) {
10941095
ggml_perf_accumulate(perf_totals, res->get_gf());
1096+
}
10951097
#elif defined(GGML_PERF_DETAIL)
1098+
if (res) {
10961099
ggml_perf_accumulate(perf_totals, res->get_gf());
10971100
ggml_perf_write_detailed_csv(res->get_gf(), perf_all_shape_fp);
1101+
}
10981102
#endif /* GML_PERF-related flags */
10991103

11001104

0 commit comments

Comments
 (0)