Skip to content

Commit 544b266

Browse files
committed
llama: updated error output for llama_decode_internal and llama_encode_internal
1 parent a5e87bf commit 544b266

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

src/llama.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16582,7 +16582,7 @@ static int llama_decode_internal(
1658216582
const uint32_t n_tokens_all = batch_all.n_tokens;
1658316583

1658416584
if (n_tokens_all == 0) {
16585-
LLAMA_LOG_ERROR("%s: n_tokens == 0", __func__);
16585+
LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
1658616586
return -1;
1658716587
}
1658816588

@@ -16595,7 +16595,7 @@ static int llama_decode_internal(
1659516595
if (batch_all.token) {
1659616596
for (uint32_t i = 0; i < n_tokens_all; ++i) {
1659716597
if (batch_all.token[i] < 0 || (uint32_t)batch_all.token[i] >= model.vocab.n_vocab) {
16598-
LLAMA_LOG_ERROR("%s: invalid token[%d] = %d", __func__, i, batch_all.token[i]);
16598+
LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch_all.token[i]);
1659916599
return -1;
1660016600
}
1660116601
}
@@ -16883,7 +16883,7 @@ static int llama_encode_internal(
1688316883
const uint32_t n_tokens = batch.n_tokens;
1688416884

1688516885
if (n_tokens == 0) {
16886-
LLAMA_LOG_ERROR("%s: n_tokens == 0", __func__);
16886+
LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
1688716887
return -1;
1688816888
}
1688916889

@@ -16896,7 +16896,7 @@ static int llama_encode_internal(
1689616896
if (batch.token) {
1689716897
for (uint32_t i = 0; i < n_tokens; ++i) {
1689816898
if (batch.token[i] < 0 || (uint32_t)batch.token[i] >= model.vocab.n_vocab) {
16899-
LLAMA_LOG_ERROR("%s: invalid token[%d] = %d", __func__, i, batch.token[i]);
16899+
LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]);
1690016900
return -1;
1690116901
}
1690216902
}

0 commit comments

Comments
 (0)