Skip to content

Commit d712e0a

Browse files
authored
Merge branch 'ggml-org:master' into ug/port-sweep-bench
2 parents c3c5aaa + 19f4dec commit d712e0a

File tree

4 files changed

+54
-16
lines changed

4 files changed

+54
-16
lines changed

convert_hf_to_gguf.py

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1334,6 +1334,12 @@ def _find_param(self, obj: dict[str, Any], keys: Iterable[str], optional: bool =
13341334
return None
13351335
raise KeyError(f"could not find any of: {keys}")
13361336

1337+
def tensor_force_quant(self, name, new_name, bid, n_dims):
1338+
del bid, name, n_dims # unused
1339+
if ".patch_embd.weight" in new_name:
1340+
return gguf.GGMLQuantizationType.F16 if self.ftype == gguf.LlamaFileType.MOSTLY_F16 else gguf.GGMLQuantizationType.F32
1341+
return False
1342+
13371343

13381344
@ModelBase.register("GPTNeoXForCausalLM")
13391345
class GPTNeoXModel(TextModel):
@@ -2305,10 +2311,9 @@ def set_gguf_parameters(self):
23052311
self.gguf_writer.add_vision_use_gelu(True)
23062312

23072313
def tensor_force_quant(self, name, new_name, bid, n_dims):
2308-
del bid, new_name, n_dims # unused
23092314
if ".embeddings." in name:
23102315
return gguf.GGMLQuantizationType.F32
2311-
return False
2316+
return super().tensor_force_quant(name, new_name, bid, n_dims)
23122317

23132318
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
23142319
del bid # unused
@@ -3296,12 +3301,9 @@ def set_gguf_parameters(self):
32963301
self.gguf_writer.add_vision_attention_layernorm_eps(self.global_config.get("rms_norm_eps", 1e-6))
32973302

32983303
def tensor_force_quant(self, name, new_name, bid, n_dims):
3299-
del bid, name, n_dims # unused
3300-
if ".patch_embd." in new_name:
3301-
return gguf.GGMLQuantizationType.F16
33023304
if ".position_embd." in new_name:
33033305
return gguf.GGMLQuantizationType.F32
3304-
return False
3306+
return super().tensor_force_quant(name, new_name, bid, n_dims)
33053307

33063308
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
33073309
del bid # unused
@@ -3374,10 +3376,9 @@ def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
33743376
yield ("audio_tower.embed_positions.weight", pos_embd)
33753377

33763378
def tensor_force_quant(self, name, new_name, bid, n_dims):
3377-
del bid, new_name, n_dims # unused
33783379
if ".conv" in name and ".weight" in name:
33793380
return gguf.GGMLQuantizationType.F16
3380-
return False
3381+
return super().tensor_force_quant(name, new_name, bid, n_dims)
33813382

33823383
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
33833384
if name.startswith("thinker."):
@@ -3423,12 +3424,9 @@ def set_gguf_parameters(self):
34233424
self.gguf_writer.add_vision_projector_scale_factor(int(1.0 / downsample_ratio))
34243425

34253426
def tensor_force_quant(self, name, new_name, bid, n_dims):
3426-
del bid, name, n_dims # unused
3427-
if ".patch_embd." in new_name:
3428-
return gguf.GGMLQuantizationType.F16
34293427
if ".position_embd." in new_name:
34303428
return gguf.GGMLQuantizationType.F32
3431-
return False
3429+
return super().tensor_force_quant(name, new_name, bid, n_dims)
34323430

34333431
def _mapping_interns1_name(self, name):
34343432
names_map = {
@@ -5062,13 +5060,12 @@ def set_gguf_parameters(self):
50625060
self.gguf_writer.add_vision_projector_scale_factor(proj_scale_factor)
50635061

50645062
def tensor_force_quant(self, name, new_name, bid, n_dims):
5065-
del bid, new_name, n_dims # unused
50665063
# related to https://github.com/ggml-org/llama.cpp/issues/13025
50675064
if "input_projection" in name:
50685065
return gguf.GGMLQuantizationType.F16
50695066
if ".embeddings." in name:
50705067
return gguf.GGMLQuantizationType.F32
5071-
return False
5068+
return super().tensor_force_quant(name, new_name, bid, n_dims)
50725069

50735070
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
50745071
del bid # unused
@@ -7727,10 +7724,9 @@ def set_gguf_parameters(self):
77277724
self.gguf_writer.add_audio_attention_layernorm_eps(self.hparams.get("layer_norm_eps", 1e-5))
77287725

77297726
def tensor_force_quant(self, name, new_name, bid, n_dims):
7730-
del bid, new_name, n_dims # unused
77317727
if ".conv" in name and ".weight" in name:
77327728
return gguf.GGMLQuantizationType.F16
7733-
return False
7729+
return super().tensor_force_quant(name, new_name, bid, n_dims)
77347730

77357731
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
77367732
del bid # unused

ggml/src/ggml-vulkan/ggml-vulkan.cpp

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -464,6 +464,7 @@ struct vk_device_struct {
464464
vk_pipeline pipeline_upscale_nearest_f32, pipeline_upscale_bilinear_f32, pipeline_upscale_bilinear_ac_f32;
465465
vk_pipeline pipeline_scale_f32;
466466
vk_pipeline pipeline_sqr_f32;
467+
vk_pipeline pipeline_sqrt_f32;
467468
vk_pipeline pipeline_sin_f32;
468469
vk_pipeline pipeline_cos_f32;
469470
vk_pipeline pipeline_clamp_f32;
@@ -3031,6 +3032,7 @@ static void ggml_vk_load_shaders(vk_device& device) {
30313032
ggml_vk_create_pipeline(device, device->pipeline_scale_f32, "scale_f32", scale_f32_len, scale_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30323033

30333034
ggml_vk_create_pipeline(device, device->pipeline_sqr_f32, "sqr_f32", sqr_f32_len, sqr_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
3035+
ggml_vk_create_pipeline(device, device->pipeline_sqrt_f32, "sqrt_f32", sqrt_f32_len, sqrt_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30343036
ggml_vk_create_pipeline(device, device->pipeline_sin_f32, "sin_f32", sin_f32_len, sin_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30353037
ggml_vk_create_pipeline(device, device->pipeline_cos_f32, "cos_f32", cos_f32_len, cos_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30363038

@@ -6981,6 +6983,11 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
69816983
return ctx->device->pipeline_sqr_f32;
69826984
}
69836985
return nullptr;
6986+
case GGML_OP_SQRT:
6987+
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
6988+
return ctx->device->pipeline_sqrt_f32;
6989+
}
6990+
return nullptr;
69846991
case GGML_OP_SIN:
69856992
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
69866993
return ctx->device->pipeline_sin_f32;
@@ -7290,6 +7297,7 @@ static bool ggml_vk_op_supports_incontiguous(ggml_op op) {
72907297
case GGML_OP_CONCAT:
72917298
case GGML_OP_UPSCALE:
72927299
case GGML_OP_SQR:
7300+
case GGML_OP_SQRT:
72937301
case GGML_OP_SIN:
72947302
case GGML_OP_COS:
72957303
case GGML_OP_CLAMP:
@@ -7595,6 +7603,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
75957603
case GGML_OP_MUL:
75967604
case GGML_OP_SCALE:
75977605
case GGML_OP_SQR:
7606+
case GGML_OP_SQRT:
75987607
case GGML_OP_SIN:
75997608
case GGML_OP_COS:
76007609
case GGML_OP_CLAMP:
@@ -8242,6 +8251,10 @@ static void ggml_vk_sqr(ggml_backend_vk_context * ctx, vk_context& subctx, const
82428251
ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SQR, vk_op_unary_push_constants_init(src0, dst), dryrun);
82438252
}
82448253

8254+
static void ggml_vk_sqrt(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
8255+
ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SQRT, vk_op_unary_push_constants_init(src0, dst), dryrun);
8256+
}
8257+
82458258
static void ggml_vk_sin(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
82468259
ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SIN, vk_op_unary_push_constants_init(src0, dst), dryrun);
82478260
}
@@ -9697,6 +9710,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
96979710
case GGML_OP_UPSCALE:
96989711
case GGML_OP_SCALE:
96999712
case GGML_OP_SQR:
9713+
case GGML_OP_SQRT:
97009714
case GGML_OP_SIN:
97019715
case GGML_OP_COS:
97029716
case GGML_OP_CLAMP:
@@ -9766,6 +9780,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
97669780
case GGML_OP_UPSCALE:
97679781
case GGML_OP_SCALE:
97689782
case GGML_OP_SQR:
9783+
case GGML_OP_SQRT:
97699784
case GGML_OP_SIN:
97709785
case GGML_OP_COS:
97719786
case GGML_OP_CLAMP:
@@ -9867,6 +9882,10 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
98679882
case GGML_OP_SQR:
98689883
ggml_vk_sqr(ctx, compute_ctx, src0, node, dryrun);
98699884

9885+
break;
9886+
case GGML_OP_SQRT:
9887+
ggml_vk_sqrt(ctx, compute_ctx, src0, node, dryrun);
9888+
98709889
break;
98719890
case GGML_OP_SIN:
98729891
ggml_vk_sin(ctx, compute_ctx, src0, node, dryrun);
@@ -10118,6 +10137,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph *
1011810137
case GGML_OP_UPSCALE:
1011910138
case GGML_OP_SCALE:
1012010139
case GGML_OP_SQR:
10140+
case GGML_OP_SQRT:
1012110141
case GGML_OP_SIN:
1012210142
case GGML_OP_COS:
1012310143
case GGML_OP_CLAMP:
@@ -11357,6 +11377,7 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
1135711377
case GGML_OP_SILU_BACK:
1135811378
case GGML_OP_RMS_NORM_BACK:
1135911379
case GGML_OP_SQR:
11380+
case GGML_OP_SQRT:
1136011381
case GGML_OP_SIN:
1136111382
case GGML_OP_COS:
1136211383
case GGML_OP_CLAMP:
@@ -11801,6 +11822,8 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph *
1180111822
tensor_clone = ggml_scale_bias(ggml_ctx, src_clone[0], params[0], params[1]);
1180211823
} else if (tensor->op == GGML_OP_SQR) {
1180311824
tensor_clone = ggml_sqr(ggml_ctx, src_clone[0]);
11825+
} else if (tensor->op == GGML_OP_SQRT) {
11826+
tensor_clone = ggml_sqrt(ggml_ctx, src_clone[0]);
1180411827
} else if (tensor->op == GGML_OP_SIN) {
1180511828
tensor_clone = ggml_sin(ggml_ctx, src_clone[0]);
1180611829
} else if (tensor->op == GGML_OP_COS) {
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
#version 450
2+
3+
#include "types.comp"
4+
#include "generic_unary_head.comp"
5+
6+
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
7+
8+
void main() {
9+
const uint idx = get_idx();
10+
11+
if (idx >= p.ne) {
12+
return;
13+
}
14+
15+
const FLOAT_TYPE val = FLOAT_TYPE(data_a[get_aoffset() + src0_idx(idx)]);
16+
data_d[get_doffset() + dst_idx(idx)] = D_TYPE(sqrt(val));
17+
}

ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -566,6 +566,8 @@ void process_shaders() {
566566

567567
string_to_spv("sqr_f32", "square.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
568568

569+
string_to_spv("sqrt_f32", "sqrt.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
570+
569571
string_to_spv("sin_f32", "sin.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
570572

571573
string_to_spv("cos_f32", "cos.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});

0 commit comments

Comments
 (0)