Skip to content

Commit 53608db

Browse files
committed
cont : reuse the var
ggml-ci
1 parent 7d7003d commit 53608db

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

src/llama-batch.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -250,15 +250,15 @@ bool llama_batch_allocr::init(
250250
bool ok = true;
251251

252252
if (batch.token) {
253-
if (seq_pos_min(s) != memory->seq_pos_max(s) + 1) {
253+
if (seq_pos_min(s) != p0 + 1) {
254254
ok = false;
255255
}
256256
} else {
257257
assert(batch.embd);
258258

259259
// for embeddings (typically used as vision input), we allow them to have repeating positions
260260
// ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762
261-
if (seq_pos_min(s) != memory->seq_pos_max(s) && seq_pos_min(s) != memory->seq_pos_max(s) + 1) {
261+
if (seq_pos_min(s) != p0 && seq_pos_min(s) != p0 + 1) {
262262
ok = false;
263263
}
264264
}
@@ -269,7 +269,7 @@ bool llama_batch_allocr::init(
269269
" - the last position stored in the memory module of the context (i.e. the KV cache) for sequence %d is X = %d\n"
270270
" - the tokens for sequence %d in the input batch have a starting position of Y = %d\n"
271271
" it is required that the sequence positions remain consecutive: Y = X + 1\n",
272-
__func__, s, s, memory->seq_pos_max(s), s, seq_pos_min(s));
272+
__func__, s, s, p0, s, seq_pos_min(s));
273273

274274
return false;
275275
}

0 commit comments

Comments
 (0)