Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions include/llama.h
Original file line number Diff line number Diff line change
Expand Up @@ -482,6 +482,7 @@ extern "C" {

LLAMA_API int32_t llama_model_n_ctx_train(const struct llama_model * model);
LLAMA_API int32_t llama_model_n_embd (const struct llama_model * model);
LLAMA_API int32_t llama_model_n_embd_full(const struct llama_model * model);
LLAMA_API int32_t llama_model_n_layer (const struct llama_model * model);
LLAMA_API int32_t llama_model_n_head (const struct llama_model * model);
LLAMA_API int32_t llama_model_n_head_kv (const struct llama_model * model);
Expand Down
14 changes: 7 additions & 7 deletions src/llama-context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -620,7 +620,7 @@ float * llama_context::get_embeddings_ith(int32_t i) {
throw std::runtime_error(format("corrupt output buffer (j=%" PRId64 ", n_outputs=%d)", j, n_outputs));
}

return embd + j*model.hparams.n_embd;
return embd + j*model.hparams.n_embd_full;
} catch (const std::exception & err) {
LLAMA_LOG_ERROR("%s: invalid embeddings id %d, reason: %s\n", __func__, i, err.what());
#ifndef NDEBUG
Expand Down Expand Up @@ -808,7 +808,7 @@ int llama_context::encode(const llama_batch & batch_inp) {

const auto & hparams = model.hparams;

const int64_t n_embd = hparams.n_embd;
const int64_t n_embd = hparams.n_embd_full;
const int64_t n_vocab = model.vocab.n_tokens();

// note: during encode, we always pass the full sequence starting from pos = 0
Expand Down Expand Up @@ -977,7 +977,7 @@ int llama_context::decode(const llama_batch & batch_inp) {
const auto & hparams = model.hparams;

const int64_t n_vocab = vocab.n_tokens();
const int64_t n_embd = hparams.n_embd;
const int64_t n_embd = hparams.n_embd_full;

// when computing embeddings, all tokens are output
const bool output_all = cparams.embeddings;
Expand Down Expand Up @@ -1276,7 +1276,7 @@ uint32_t llama_context::output_reserve(int32_t n_outputs) {

const auto n_batch = cparams.n_batch;
const auto n_vocab = vocab.n_tokens();
const auto n_embd = hparams.n_embd;
const auto n_embd = hparams.n_embd_full;

bool has_logits = true;
bool has_embd = cparams.embeddings;
Expand Down Expand Up @@ -1340,7 +1340,7 @@ uint32_t llama_context::output_reserve(int32_t n_outputs) {

void llama_context::output_reorder() {
const uint64_t n_vocab = model.vocab.n_tokens();
const uint64_t n_embd = model.hparams.n_embd;
const uint64_t n_embd = model.hparams.n_embd_full;

for (size_t s = 0; s < output_swaps.size(); ++s) {
const uint64_t i0 = output_swaps[s].i0;
Expand Down Expand Up @@ -1883,7 +1883,7 @@ size_t llama_context::state_write_data(llama_io_write_i & io) {
{
LLAMA_LOG_DEBUG("%s: - writing embeddings\n", __func__);

const uint64_t embd_size = std::min((uint64_t) this->embd_size, (uint64_t) n_outputs * model.hparams.n_embd);
const uint64_t embd_size = std::min((uint64_t) this->embd_size, (uint64_t) n_outputs * model.hparams.n_embd_full);

io.write(&embd_size, sizeof(embd_size));

Expand Down Expand Up @@ -2135,7 +2135,7 @@ void llama_context::opt_epoch_iter(
batch.logits [pos_batch] = true;
}

if (!balloc->init(batch, model.vocab, nullptr, model.hparams.n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, true)) {
if (!balloc->init(batch, model.vocab, nullptr, model.hparams.n_embd_full, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, true)) {
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
return;
}
Expand Down
4 changes: 2 additions & 2 deletions src/llama-graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1142,7 +1142,7 @@ ggml_tensor * llm_graph_context::build_moe_ffn(

// input embeddings with optional lora
ggml_tensor * llm_graph_context::build_inp_embd(ggml_tensor * tok_embd) const {
const int64_t n_embd = hparams.n_embd;
const int64_t n_embd = hparams.n_embd_full;

auto inp = std::make_unique<llm_graph_input_embd>();

Expand Down Expand Up @@ -1279,7 +1279,7 @@ ggml_tensor * llm_graph_context::build_inp_cross_embd() const {
// return cur;
//}

const auto n_embd = !cross->v_embd.empty() ? cross->n_embd : hparams.n_embd;
const auto n_embd = !cross->v_embd.empty() ? cross->n_embd : hparams.n_embd_full;
const auto n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train;

cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_enc);
Expand Down
1 change: 1 addition & 0 deletions src/llama-hparams.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ struct llama_hparams {

uint32_t n_ctx_train; // context size the model was trained on
uint32_t n_embd;
uint32_t n_embd_full; // main + auxiliary embeds
uint32_t n_embd_features = 0;
uint32_t n_layer;
int32_t n_layer_kv_from_start = -1; // if non-negative, the first n_layer_kv_from_start layers have KV cache
Expand Down
23 changes: 10 additions & 13 deletions src/llama-model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w
} break;
case GGML_OP_IM2COL:
{
const int n_embd = hparams.n_embd;
const int n_embd = hparams.n_embd_full;
ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, n_embd, w->ne[1], 1, 1);
op_tensor = ggml_im2col(ctx, w, b, 1, 0, 0, 0, 1, 0, false, GGML_TYPE_F16);
} break;
Expand Down Expand Up @@ -505,6 +505,7 @@ void llama_model::load_hparams(llama_model_loader & ml) {
ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
ml.get_key(LLM_KV_EXPERT_GROUP_COUNT, hparams.n_expert_groups, false);
ml.get_key(LLM_KV_EXPERT_GROUP_USED_COUNT, hparams.n_group_used, false);
hparams.n_embd_full = hparams.n_embd;

if (arch == LLM_ARCH_WAVTOKENIZER_DEC) {
ml.get_key(LLM_KV_FEATURES_LENGTH, hparams.n_embd_features);
Expand Down Expand Up @@ -1041,7 +1042,7 @@ void llama_model::load_hparams(llama_model_loader & ml) {
}
// since vision model stacks deepstack features along feature dim
// we also create a fake "n_embd" for text model to be the main embd + deepstack embds
hparams.n_embd *= hparams.n_deepstack_layers + 1;
hparams.n_embd_full *= hparams.n_deepstack_layers + 1;
} break;
case LLM_ARCH_QWEN3MOE:
{
Expand All @@ -1067,7 +1068,7 @@ void llama_model::load_hparams(llama_model_loader & ml) {
}
// since vision model stacks deepstack features along feature dim
// we also create a fake "n_embd" for text model to be the main embd + deepstack embds
hparams.n_embd *= hparams.n_deepstack_layers + 1;
hparams.n_embd_full *= hparams.n_deepstack_layers + 1;
} break;
case LLM_ARCH_PHI2:
{
Expand Down Expand Up @@ -3332,10 +3333,6 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
case LLM_ARCH_QWEN3:
case LLM_ARCH_QWEN3VL:
{
// for model loading, the weights only have the main embd
// so we need to divide by the number of deepstack layers + 1
// n_embd is const int so we declare a new variable
int64_t n_embd = hparams.n_embd / (hparams.n_deepstack_layers + 1);
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);

// output
Expand Down Expand Up @@ -3371,10 +3368,6 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
case LLM_ARCH_QWEN3MOE:
case LLM_ARCH_QWEN3VLMOE:
{
// for model loading, the weights only have the main embd
// so we need to divide by the number of deepstack layers + 1
// n_embd is const int so we declare a new variable
int64_t n_embd = hparams.n_embd / (hparams.n_deepstack_layers + 1);
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);

// output
Expand Down Expand Up @@ -6681,8 +6674,8 @@ ggml_backend_buffer_type_t llama_model::select_buft(int il) const {
return ::select_buft(
*pimpl->dev_layer.at(il).buft_list,
[&](ggml_context * ctx) {
ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd_full);
ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd_full);
return ggml_add(ctx, cur, layer_dir);
});
}
Expand Down Expand Up @@ -7329,6 +7322,10 @@ int32_t llama_model_n_embd(const llama_model * model) {
return model->hparams.n_embd;
}

int32_t llama_model_n_embd_full(const llama_model * model) {
return model->hparams.n_embd_full;
}

int32_t llama_model_n_layer(const llama_model * model) {
return model->hparams.n_layer;
}
Expand Down
3 changes: 1 addition & 2 deletions src/models/qwen3vl-moe.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
#include "models.h"

llm_build_qwen3vlmoe::llm_build_qwen3vlmoe(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
const int64_t n_embd_full = hparams.n_embd; // main embd + deepstack embds
const size_t n_deepstack_layers = hparams.n_deepstack_layers;
const int64_t n_embd = n_embd_full / (n_deepstack_layers + 1);
const int64_t n_embd = hparams.n_embd;
const int64_t n_embd_head = hparams.n_embd_head_v;

GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
Expand Down
5 changes: 1 addition & 4 deletions src/models/qwen3vl.cpp
Original file line number Diff line number Diff line change
@@ -1,13 +1,10 @@
#include "models.h"

llm_build_qwen3vl::llm_build_qwen3vl(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {

const int64_t n_embd_full = hparams.n_embd; // main embd + deepstack embds
const size_t n_deepstack_layers = hparams.n_deepstack_layers;
const int64_t n_embd = n_embd_full / (n_deepstack_layers + 1);
const int64_t n_embd = hparams.n_embd;
const int64_t n_embd_head = hparams.n_embd_head_v;


GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
GGML_ASSERT(n_embd_head == hparams.n_rot);

Expand Down
2 changes: 1 addition & 1 deletion tools/mtmd/mtmd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ struct mtmd_context {
print_timings(ctx_params.print_timings),
n_threads (ctx_params.n_threads),
media_marker (ctx_params.media_marker),
n_embd_text (llama_model_n_embd(text_model))
n_embd_text (llama_model_n_embd_full(text_model))
{
if (std::string(ctx_params.image_marker) != MTMD_DEFAULT_IMAGE_MARKER) {
throw std::runtime_error("custom image_marker is not supported anymore, use media_marker instead");
Expand Down
Loading