We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 082e7a0 commit 661890cCopy full SHA for 661890c
src/llama-context.cpp
@@ -2290,7 +2290,7 @@ void llama_context::opt_epoch(
2290
std::vector<llama_token> labels_sparse(n_ctx);
2291
std::vector<int32_t> masks_sparse(n_ctx);
2292
2293
- int64_t idata = (resume_from_batch >= 0) ? resume_from_batch + 1 : 0;
+ int64_t idata = (resume_from_batch > 0) ? resume_from_batch + 1 : 0;
2294
2295
int64_t t_loop_start = ggml_time_us();
2296
int64_t ndata_in_loop = idata_split*ubatch_per_ctx;
0 commit comments