Skip to content

Commit 647452b

Browse files
author
Anoop Kapoor
committed
@FIR-1033 - llama.cpp crash with below prompt for model SakanaAI-TinySwallow-1.5B-Instruct-F32.gguf
1 parent 461411f commit 647452b

File tree

1 file changed

+6
-3
lines changed

1 file changed

+6
-3
lines changed

src/llama-context.cpp

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1090,12 +1090,14 @@ int llama_context::decode(const llama_batch & batch_inp) {
10901090
ggml_status status;
10911091
const auto * res = process_ubatch(ubatch, LLM_GRAPH_TYPE_DECODER, mctx.get(), status);
10921092

1093-
#if defined(GGML_PERF) || defined(GGML_PERF_RELEASE)
1093+
if (res) {
1094+
#if defined(GGML_PERF) || defined(GGML_PERF_RELEASE)
10941095
ggml_perf_accumulate(perf_totals, res->get_gf());
1095-
#elif defined(GGML_PERF_DETAIL)
1096+
#elif defined(GGML_PERF_DETAIL)
10961097
ggml_perf_accumulate(perf_totals, res->get_gf());
10971098
ggml_perf_write_detailed_csv(res->get_gf(), perf_all_shape_fp);
1098-
#endif /* GML_PERF-related flags */
1099+
#endif /* GML_PERF-related flags */
1100+
}
10991101

11001102

11011103
if (!res) {
@@ -1121,6 +1123,7 @@ int llama_context::decode(const llama_batch & batch_inp) {
11211123
memory->seq_rm(s, pos_min[s], -1);
11221124
}
11231125

1126+
printf("\n ANOOP Status vakue %d ", status);
11241127
switch (status) {
11251128
case GGML_STATUS_ABORTED: return 2;
11261129
case GGML_STATUS_ALLOC_FAILED: return -2;

0 commit comments

Comments
 (0)