We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent c013397 commit 1b9ca1cCopy full SHA for 1b9ca1c
src/llama-context.cpp
@@ -805,7 +805,7 @@ int llama_context::encode(const llama_batch & batch_inp) {
805
}
806
807
// extract embeddings
808
- if (cparams.embeddings && t_embd) {
+ if (embd && t_embd) {
809
ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(sched.get(), t_embd);
810
GGML_ASSERT(backend_embd != nullptr);
811
0 commit comments