Skip to content

Commit a35f501

Browse files
committed
Merge branch 'master' into llama-bench-tensor-override
2 parents 405224a + 12b1750 commit a35f501

File tree

82 files changed

+7008
-5606
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

82 files changed

+7008
-5606
lines changed

.github/workflows/build.yml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1766,16 +1766,17 @@ jobs:
17661766
if: ${{ github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'Ascend NPU') }}
17671767
defaults:
17681768
run:
1769-
shell: bash -el {0}
1770-
runs-on: ubuntu-24.04-arm
1769+
shell: bash -el {0}
17711770
strategy:
17721771
matrix:
1772+
arch: [x86, aarch64]
17731773
cann:
17741774
- '8.1.RC1.alpha001-910b-openeuler22.03-py3.10'
17751775
device:
17761776
- 'ascend910b3'
17771777
build:
17781778
- 'Release'
1779+
runs-on: ${{ matrix.arch == 'aarch64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }}
17791780
container: ascendai/cann:${{ matrix.cann }}
17801781
steps:
17811782
- name: Checkout

Makefile

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -780,10 +780,6 @@ ifdef GGML_HIP
780780

781781
MK_CPPFLAGS += -DGGML_USE_HIP -DGGML_USE_CUDA
782782

783-
ifdef GGML_HIP_UMA
784-
MK_CPPFLAGS += -DGGML_HIP_UMA
785-
endif # GGML_HIP_UMA
786-
787783
MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
788784
MK_LDFLAGS += -L$(ROCM_PATH)/lib64 -Wl,-rpath=$(ROCM_PATH)/lib64
789785
MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas

convert_hf_to_gguf.py

Lines changed: 31 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4422,6 +4422,10 @@ def set_vocab(self):
44224422
self._set_vocab_gpt2()
44234423

44244424
def set_gguf_parameters(self):
4425+
4426+
# note: deepseek2 using MLA converts into MQA (ie: GQA with 1 group)
4427+
self.hparams["num_key_value_heads"] = 1
4428+
44254429
super().set_gguf_parameters()
44264430
hparams = self.hparams
44274431

@@ -4430,8 +4434,13 @@ def set_gguf_parameters(self):
44304434
if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
44314435
self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
44324436
self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
4433-
self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
4434-
self.gguf_writer.add_value_length(hparams["v_head_dim"])
4437+
4438+
# note: deepseek2 using MLA converts into MQA with larger heads, then decompresses to MHA
4439+
self.gguf_writer.add_key_length(hparams["kv_lora_rank"] + hparams["qk_rope_head_dim"])
4440+
self.gguf_writer.add_value_length(hparams["kv_lora_rank"])
4441+
self.gguf_writer.add_key_length_mla(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
4442+
self.gguf_writer.add_value_length_mla(hparams["v_head_dim"])
4443+
44354444
self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
44364445
self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
44374446
self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
@@ -4500,6 +4509,26 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
45004509
else:
45014510
return []
45024511

4512+
# note: MLA with the absorption optimization, needs these two split and k_b_proj transposed
4513+
if name.endswith("kv_b_proj.weight"):
4514+
name_kb = name.replace("kv_b_proj", "k_b_proj")
4515+
name_vb = name.replace("kv_b_proj", "v_b_proj")
4516+
4517+
n_head_kv = self.hparams["num_key_value_heads"]
4518+
v_head_dim = self.hparams["v_head_dim"]
4519+
qk_nope_head_dim = self.hparams["qk_nope_head_dim"]
4520+
4521+
assert data_torch.shape[0] == n_head_kv * (v_head_dim + qk_nope_head_dim)
4522+
4523+
kv_b = data_torch.view(n_head_kv, v_head_dim + qk_nope_head_dim, data_torch.shape[-1])
4524+
k_b, v_b = torch.split(kv_b, [qk_nope_head_dim, v_head_dim], dim=1)
4525+
k_b = k_b.transpose(1, 2)
4526+
4527+
return [
4528+
(self.map_tensor_name(name_kb), k_b),
4529+
(self.map_tensor_name(name_vb), v_b)
4530+
]
4531+
45034532
return [(self.map_tensor_name(name), data_torch)]
45044533

45054534
def prepare_tensors(self):

docs/build.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -259,8 +259,6 @@ You can download it from your Linux distro's package manager or from here: [ROCm
259259
cmake -S . -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \
260260
&& cmake --build build --config Release -- -j 16
261261
```
262-
On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DGGML_HIP_UMA=ON`.
263-
However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs).
264262

265263
To enhance flash attention performance on RDNA3+ or CDNA architectures, you can utilize the rocWMMA library by enabling the `-DGGML_HIP_ROCWMMA_FATTN=ON` option. This requires rocWMMA headers to be installed on the build system.
266264

@@ -296,6 +294,10 @@ You can download it from your Linux distro's package manager or from here: [ROCm
296294
The environment variable [`HIP_VISIBLE_DEVICES`](https://rocm.docs.amd.com/en/latest/understand/gpu_isolation.html#hip-visible-devices) can be used to specify which GPU(s) will be used.
297295
If your GPU is not officially supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 (e.g. gfx1030, gfx1031, or gfx1035) or 11.0.0 on RDNA3.
298296

297+
### Unified Memory
298+
299+
On Linux it is possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting environment variable `GGML_CUDA_ENABLE_UNIFIED_MEMORY=1`. However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs).
300+
299301
## Vulkan
300302

301303
**Windows**

examples/llava/gemma3-cli.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -317,6 +317,6 @@ int main(int argc, char ** argv) {
317317
is_first_msg = false;
318318
}
319319
}
320-
320+
llama_perf_context_print(ctx.lctx);
321321
return 0;
322322
}

examples/quantize/quantize.cpp

Lines changed: 115 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,15 @@
99
#include <fstream>
1010
#include <cmath>
1111
#include <cctype>
12+
#include <algorithm>
1213

1314
struct quant_option {
1415
std::string name;
1516
llama_ftype ftype;
1617
std::string desc;
1718
};
1819

19-
static const std::vector<struct quant_option> QUANT_OPTIONS = {
20+
static const std::vector<quant_option> QUANT_OPTIONS = {
2021
{ "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
2122
{ "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 4.78G, +0.4511 ppl @ Llama-3-8B", },
2223
{ "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 5.21G, +0.1316 ppl @ Llama-3-8B", },
@@ -105,7 +106,8 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
105106
//
106107
[[noreturn]]
107108
static void usage(const char * executable) {
108-
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
109+
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type]\n", executable);
110+
printf(" [--token-embedding-type] [--tensor-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n");
109111
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
110112
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
111113
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
@@ -114,6 +116,8 @@ static void usage(const char * executable) {
114116
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
115117
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor\n");
116118
printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n");
119+
printf(" --tensor-type TENSOR=TYPE: quantize this tensor to this ggml_type. example: --tensor-type attn_q=q8_0\n");
120+
printf(" Advanced option to selectively quantize tensors. May be specified multiple times.\n");
117121
printf(" --keep-split: will generate quantized model in the same shards as input\n");
118122
printf(" --override-kv KEY=TYPE:VALUE\n");
119123
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
@@ -244,6 +248,107 @@ static ggml_type parse_ggml_type(const char * arg) {
244248
return GGML_TYPE_COUNT;
245249
}
246250

251+
// Allowed tensors for arbitrary quantization with --tensor-type option
252+
static const std::vector<std::string> ALLOWED_TENSOR_TYPE = {
253+
"attn_k",
254+
"attn_kv_a_mqa",
255+
"attn_kv_b",
256+
"attn_o",
257+
"attn_output",
258+
"attn_q",
259+
"attn_q_a",
260+
"attn_q_b",
261+
"attn_qkv",
262+
"attn_v",
263+
"channel_mix_key",
264+
"channel_mix_receptance",
265+
"channel_mix_value",
266+
"cls",
267+
"cls.output",
268+
"cross_attn_k",
269+
"cross_attn_o",
270+
"cross_attn_q",
271+
"cross_attn_v",
272+
"ffn_act",
273+
"ffn_down",
274+
"ffn_down_exps",
275+
"ffn_down_shexp",
276+
"ffn_gate",
277+
"ffn_gate_exps",
278+
"ffn_gate_shexp",
279+
"ffn_up",
280+
"ffn_up_exps",
281+
"ffn_up_shexp",
282+
"ssm_in",
283+
"ssm_out",
284+
"time_mix_gate",
285+
"time_mix_key",
286+
"time_mix_output",
287+
"time_mix_receptance",
288+
"time_mix_value",
289+
};
290+
291+
// changes to this struct must be replicated in llama-quant.cpp
292+
struct tensor_quantization {
293+
std::string name;
294+
ggml_type quant = GGML_TYPE_COUNT;
295+
};
296+
297+
static bool parse_tensor_type(const char * data, std::vector<tensor_quantization> & tensor_type) {
298+
const char * sep = strchr(data, '=');
299+
if (sep == nullptr) {
300+
printf("\n%s: malformed tensor type '%s'\n\n", __func__, data);
301+
return false;
302+
}
303+
304+
const size_t tn_len = sep - data;
305+
if (tn_len == 0) {
306+
printf("\n%s: missing tensor name\n\n", __func__);
307+
return false;
308+
}
309+
310+
if (const size_t qt_len = strlen(sep); qt_len == 1) {
311+
printf("\n%s: missing quantization type\n\n", __func__);
312+
return false;
313+
}
314+
315+
std::string tn(data, tn_len);
316+
std::transform(tn.begin(), tn.end(), tn.begin(), tolower);
317+
sep++;
318+
const std::string qt(sep);
319+
320+
bool found = false;
321+
for (const auto & allowed : ALLOWED_TENSOR_TYPE) {
322+
std::string tensor;
323+
tensor = tn.rfind('.') != std::string::npos ? tn.substr(tn.rfind('.') + 1) : tn;
324+
// handle special case of cls.output
325+
std::string cls_output = "cls.output";
326+
if (tn.find(cls_output) != std::string::npos) {
327+
tensor = "cls.output";
328+
}
329+
// check if an allowed tensor exists and it's at the end of the kv string
330+
if (tensor == allowed) {
331+
found = true;
332+
break;
333+
}
334+
}
335+
if (!found) {
336+
printf("\n%s: invalid tensor name '%s'\n\n", __func__, tn.c_str());
337+
return false;
338+
}
339+
340+
if (parse_ggml_type(qt.c_str()) == GGML_TYPE_COUNT) {
341+
printf("\n%s: invalid quantization type '%s'\n\n", __func__, qt.c_str());
342+
return false;
343+
}
344+
345+
tensor_quantization tqz;
346+
tqz.name = tn;
347+
tqz.quant = parse_ggml_type(qt.c_str());
348+
tensor_type.emplace_back(std::move(tqz));
349+
return true;
350+
}
351+
247352
int main(int argc, char ** argv) {
248353
if (argc < 3) {
249354
usage(argv[0]);
@@ -255,6 +360,7 @@ int main(int argc, char ** argv) {
255360
std::string imatrix_file;
256361
std::vector<std::string> included_weights, excluded_weights;
257362
std::vector<llama_model_kv_override> kv_overrides;
363+
std::vector<tensor_quantization> tensor_types;
258364

259365
for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
260366
if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) {
@@ -277,6 +383,10 @@ int main(int argc, char ** argv) {
277383
} else {
278384
usage(argv[0]);
279385
}
386+
} else if (strcmp(argv[arg_idx], "--tensor-type") == 0) {
387+
if (arg_idx == argc-1 || !parse_tensor_type(argv[++arg_idx], tensor_types)) {
388+
usage(argv[0]);
389+
}
280390
} else if (strcmp(argv[arg_idx], "--override-kv") == 0) {
281391
if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) {
282392
usage(argv[0]);
@@ -361,6 +471,9 @@ int main(int argc, char ** argv) {
361471
kv_overrides.back().key[0] = 0;
362472
params.kv_overrides = &kv_overrides;
363473
}
474+
if (!tensor_types.empty()) {
475+
params.tensor_types = &tensor_types;
476+
}
364477

365478
llama_backend_init();
366479

examples/sycl/build.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,10 @@ cd build
88
source /opt/intel/oneapi/setvars.sh
99

1010
#for FP16
11-
#cmake .. -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON # faster for long-prompt inference
11+
#cmake .. -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON -DLLAMA_CURL=OFF # faster for long-prompt inference
1212

1313
#for FP32
14-
cmake .. -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
14+
cmake .. -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=OFF
1515

1616
#build example/main
1717
#cmake --build . --config Release --target main

ggml/CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,6 @@ option(GGML_HIP "ggml: use HIP"
170170
option(GGML_HIP_GRAPHS "ggml: use HIP graph, experimental, slow" OFF)
171171
option(GGML_HIP_NO_VMM "ggml: do not try to use HIP VMM" ON)
172172
option(GGML_HIP_ROCWMMA_FATTN "ggml: enable rocWMMA for FlashAttention" OFF)
173-
option(GGML_HIP_UMA "ggml: use HIP unified memory architecture" OFF)
174173
option(GGML_VULKAN "ggml: use Vulkan" OFF)
175174
option(GGML_VULKAN_CHECK_RESULTS "ggml: run Vulkan op checks" OFF)
176175
option(GGML_VULKAN_DEBUG "ggml: enable Vulkan debug output" OFF)

0 commit comments

Comments
 (0)