Skip to content

Commit 82137e1

Browse files
committed
Merge branch 'upstream' into concedo_experimental
# Conflicts: # .github/workflows/update-ops-docs.yml # ggml/src/CMakeLists.txt # ggml/src/ggml-cpu/CMakeLists.txt
2 parents 86b9445 + 0398752 commit 82137e1

File tree

3 files changed

+34
-2
lines changed

3 files changed

+34
-2
lines changed

embd_res/klite.embd

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5481,6 +5481,34 @@ Current version indicated by LITEVER below.
54815481
&& String.fromCharCode(x.data[0]) == 'c')
54825482
&& String.fromCharCode(x.data[4]) == 'a');
54835483

5484+
if(!found || found.length==0)
5485+
{
5486+
let found2 = chunks.filter(x => (
5487+
x.name == "tEXt"
5488+
&& x.data.length > 6
5489+
&& String.fromCharCode(x.data[0]) == 'c')
5490+
&& String.fromCharCode(x.data[1]) == 'c'
5491+
&& String.fromCharCode(x.data[2]) == 'v'
5492+
&& String.fromCharCode(x.data[3]) == '3'); //its a stupid new v3 card
5493+
5494+
if(found2 && found2.length>0)
5495+
{
5496+
try {
5497+
let b64buf = "";
5498+
let bytes = found2[0].data; //skip the chara
5499+
for (var i = 5; i < bytes.length; i++) {
5500+
b64buf += String.fromCharCode(bytes[i]);
5501+
}
5502+
var decoded = JSON.parse(b64_decode_unicode(b64buf));
5503+
console.log(decoded);
5504+
return decoded;
5505+
} catch (e) {
5506+
console.log("Error decoding b64 in image: " + e);
5507+
return null;
5508+
}
5509+
}
5510+
}
5511+
54845512
//remove ext asset
54855513
found = found.filter(x => (
54865514
x.data.length > 12

src/llama-model.cpp

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,7 @@ const char * llm_type_name(llm_type type) {
119119
case LLM_TYPE_17B_16E: return "17Bx16E (Scout)";
120120
case LLM_TYPE_17B_128E: return "17Bx128E (Maverick)";
121121
case LLM_TYPE_A13B: return "A13B";
122+
case LLM_TYPE_7B_A1B: return "7B.A1B";
122123
case LLM_TYPE_8B_A1B: return "8B.A1B";
123124
case LLM_TYPE_21B_A3B: return "21B.A3B";
124125
case LLM_TYPE_30B_A3B: return "30B.A3B";
@@ -1848,8 +1849,10 @@ void llama_model::load_hparams(llama_model_loader & ml) {
18481849

18491850
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
18501851

1851-
switch (hparams.n_layer) {
1852-
// TODO: Add llm type label (not sure this is useful)
1852+
switch (hparams.n_embd) {
1853+
case 1536: type = LLM_TYPE_7B_A1B; break;
1854+
case 2048: case 2560: type = LLM_TYPE_3B; break;
1855+
case 4096: type = LLM_TYPE_32B; break;
18531856
default: type = LLM_TYPE_UNKNOWN;
18541857
}
18551858

src/llama-model.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ enum llm_type {
107107
LLM_TYPE_17B_16E, // llama4 Scout
108108
LLM_TYPE_17B_128E, // llama4 Maverick
109109
LLM_TYPE_A13B,
110+
LLM_TYPE_7B_A1B,
110111
LLM_TYPE_8B_A1B, // lfm2moe
111112
LLM_TYPE_21B_A3B, // Ernie MoE small
112113
LLM_TYPE_30B_A3B,

0 commit comments

Comments
 (0)