Skip to content

Commit abd0737

Browse files
authored
Merge branch 'ggml-org:master' into n_past_max
2 parents caf9dc7 + 19f4dec commit abd0737

File tree

8 files changed

+109
-60
lines changed

8 files changed

+109
-60
lines changed

.github/workflows/build.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1070,7 +1070,8 @@ jobs:
10701070
write-host "Downloading AMD HIP SDK Installer"
10711071
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
10721072
write-host "Installing AMD HIP SDK"
1073-
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
1073+
$proc = Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -PassThru
1074+
$proc.WaitForExit(600000)
10741075
write-host "Completed AMD HIP SDK installation"
10751076
10761077
- name: Verify ROCm

.github/workflows/release.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -557,7 +557,8 @@ jobs:
557557
write-host "Downloading AMD HIP SDK Installer"
558558
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
559559
write-host "Installing AMD HIP SDK"
560-
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
560+
$proc = Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -PassThru
561+
$proc.WaitForExit(600000)
561562
write-host "Completed AMD HIP SDK installation"
562563
563564
- name: Verify ROCm

convert_hf_to_gguf.py

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1334,6 +1334,12 @@ def _find_param(self, obj: dict[str, Any], keys: Iterable[str], optional: bool =
13341334
return None
13351335
raise KeyError(f"could not find any of: {keys}")
13361336

1337+
def tensor_force_quant(self, name, new_name, bid, n_dims):
1338+
del bid, name, n_dims # unused
1339+
if ".patch_embd.weight" in new_name:
1340+
return gguf.GGMLQuantizationType.F16 if self.ftype == gguf.LlamaFileType.MOSTLY_F16 else gguf.GGMLQuantizationType.F32
1341+
return False
1342+
13371343

13381344
@ModelBase.register("GPTNeoXForCausalLM")
13391345
class GPTNeoXModel(TextModel):
@@ -2305,10 +2311,9 @@ def set_gguf_parameters(self):
23052311
self.gguf_writer.add_vision_use_gelu(True)
23062312

23072313
def tensor_force_quant(self, name, new_name, bid, n_dims):
2308-
del bid, new_name, n_dims # unused
23092314
if ".embeddings." in name:
23102315
return gguf.GGMLQuantizationType.F32
2311-
return False
2316+
return super().tensor_force_quant(name, new_name, bid, n_dims)
23122317

23132318
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
23142319
del bid # unused
@@ -3296,12 +3301,9 @@ def set_gguf_parameters(self):
32963301
self.gguf_writer.add_vision_attention_layernorm_eps(self.global_config.get("rms_norm_eps", 1e-6))
32973302

32983303
def tensor_force_quant(self, name, new_name, bid, n_dims):
3299-
del bid, name, n_dims # unused
3300-
if ".patch_embd." in new_name:
3301-
return gguf.GGMLQuantizationType.F16
33023304
if ".position_embd." in new_name:
33033305
return gguf.GGMLQuantizationType.F32
3304-
return False
3306+
return super().tensor_force_quant(name, new_name, bid, n_dims)
33053307

33063308
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
33073309
del bid # unused
@@ -3374,10 +3376,9 @@ def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
33743376
yield ("audio_tower.embed_positions.weight", pos_embd)
33753377

33763378
def tensor_force_quant(self, name, new_name, bid, n_dims):
3377-
del bid, new_name, n_dims # unused
33783379
if ".conv" in name and ".weight" in name:
33793380
return gguf.GGMLQuantizationType.F16
3380-
return False
3381+
return super().tensor_force_quant(name, new_name, bid, n_dims)
33813382

33823383
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
33833384
if name.startswith("thinker."):
@@ -3423,12 +3424,9 @@ def set_gguf_parameters(self):
34233424
self.gguf_writer.add_vision_projector_scale_factor(int(1.0 / downsample_ratio))
34243425

34253426
def tensor_force_quant(self, name, new_name, bid, n_dims):
3426-
del bid, name, n_dims # unused
3427-
if ".patch_embd." in new_name:
3428-
return gguf.GGMLQuantizationType.F16
34293427
if ".position_embd." in new_name:
34303428
return gguf.GGMLQuantizationType.F32
3431-
return False
3429+
return super().tensor_force_quant(name, new_name, bid, n_dims)
34323430

34333431
def _mapping_interns1_name(self, name):
34343432
names_map = {
@@ -5062,13 +5060,12 @@ def set_gguf_parameters(self):
50625060
self.gguf_writer.add_vision_projector_scale_factor(proj_scale_factor)
50635061

50645062
def tensor_force_quant(self, name, new_name, bid, n_dims):
5065-
del bid, new_name, n_dims # unused
50665063
# related to https://github.com/ggml-org/llama.cpp/issues/13025
50675064
if "input_projection" in name:
50685065
return gguf.GGMLQuantizationType.F16
50695066
if ".embeddings." in name:
50705067
return gguf.GGMLQuantizationType.F32
5071-
return False
5068+
return super().tensor_force_quant(name, new_name, bid, n_dims)
50725069

50735070
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
50745071
del bid # unused
@@ -7727,10 +7724,9 @@ def set_gguf_parameters(self):
77277724
self.gguf_writer.add_audio_attention_layernorm_eps(self.hparams.get("layer_norm_eps", 1e-5))
77287725

77297726
def tensor_force_quant(self, name, new_name, bid, n_dims):
7730-
del bid, new_name, n_dims # unused
77317727
if ".conv" in name and ".weight" in name:
77327728
return gguf.GGMLQuantizationType.F16
7733-
return False
7729+
return super().tensor_force_quant(name, new_name, bid, n_dims)
77347730

77357731
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
77367732
del bid # unused

ggml/src/ggml-vulkan/ggml-vulkan.cpp

Lines changed: 34 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -345,6 +345,9 @@ enum vk_conv_shapes {
345345
CONV_SHAPE_COUNT,
346346
};
347347

348+
static constexpr uint32_t num_argsort_pipelines = 11;
349+
static constexpr uint32_t max_argsort_cols = 1 << (num_argsort_pipelines-1);
350+
348351
struct vk_device_struct {
349352
std::recursive_mutex mutex;
350353

@@ -461,6 +464,7 @@ struct vk_device_struct {
461464
vk_pipeline pipeline_upscale_nearest_f32, pipeline_upscale_bilinear_f32, pipeline_upscale_bilinear_ac_f32;
462465
vk_pipeline pipeline_scale_f32;
463466
vk_pipeline pipeline_sqr_f32;
467+
vk_pipeline pipeline_sqrt_f32;
464468
vk_pipeline pipeline_sin_f32;
465469
vk_pipeline pipeline_cos_f32;
466470
vk_pipeline pipeline_clamp_f32;
@@ -505,7 +509,7 @@ struct vk_device_struct {
505509
vk_pipeline pipeline_rope_neox_f32, pipeline_rope_neox_f16;
506510
vk_pipeline pipeline_rope_multi_f32, pipeline_rope_multi_f16;
507511
vk_pipeline pipeline_rope_vision_f32, pipeline_rope_vision_f16;
508-
vk_pipeline pipeline_argsort_f32;
512+
vk_pipeline pipeline_argsort_f32[num_argsort_pipelines];
509513
vk_pipeline pipeline_sum_rows_f32;
510514
vk_pipeline pipeline_argmax_f32;
511515
vk_pipeline pipeline_count_equal_i32;
@@ -870,7 +874,6 @@ struct vk_op_soft_max_push_constants {
870874

871875
struct vk_op_argsort_push_constants {
872876
uint32_t ncols;
873-
uint32_t ncols_pad;
874877
int32_t order;
875878
};
876879

@@ -3029,6 +3032,7 @@ static void ggml_vk_load_shaders(vk_device& device) {
30293032
ggml_vk_create_pipeline(device, device->pipeline_scale_f32, "scale_f32", scale_f32_len, scale_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30303033

30313034
ggml_vk_create_pipeline(device, device->pipeline_sqr_f32, "sqr_f32", sqr_f32_len, sqr_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
3035+
ggml_vk_create_pipeline(device, device->pipeline_sqrt_f32, "sqrt_f32", sqrt_f32_len, sqrt_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30323036
ggml_vk_create_pipeline(device, device->pipeline_sin_f32, "sin_f32", sin_f32_len, sin_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30333037
ggml_vk_create_pipeline(device, device->pipeline_cos_f32, "cos_f32", cos_f32_len, cos_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30343038

@@ -3099,7 +3103,9 @@ static void ggml_vk_load_shaders(vk_device& device) {
30993103
ggml_vk_create_pipeline(device, device->pipeline_rope_vision_f16, "rope_vision_f16", rope_vision_f16_len, rope_vision_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
31003104
}
31013105

3102-
ggml_vk_create_pipeline(device, device->pipeline_argsort_f32, "argsort_f32", argsort_f32_len, argsort_f32_data, "main", 2, sizeof(vk_op_argsort_push_constants), {1024, 1, 1}, {}, 1);
3106+
for (uint32_t i = 0; i < num_argsort_pipelines; ++i) {
3107+
ggml_vk_create_pipeline(device, device->pipeline_argsort_f32[i], "argsort_f32_"+std::to_string(i), argsort_f32_len, argsort_f32_data, "main", 2, sizeof(vk_op_argsort_push_constants), {1u<<i, 1, 1}, {1u<<i, i}, 1, true);
3108+
}
31033109

31043110
ggml_vk_create_pipeline(device, device->pipeline_argmax_f32, "argmax_f32", argmax_f32_len, argmax_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
31053111

@@ -6977,6 +6983,11 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
69776983
return ctx->device->pipeline_sqr_f32;
69786984
}
69796985
return nullptr;
6986+
case GGML_OP_SQRT:
6987+
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
6988+
return ctx->device->pipeline_sqrt_f32;
6989+
}
6990+
return nullptr;
69806991
case GGML_OP_SIN:
69816992
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
69826993
return ctx->device->pipeline_sin_f32;
@@ -7160,7 +7171,8 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
71607171
}
71617172
case GGML_OP_ARGSORT:
71627173
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_I32) {
7163-
return ctx->device->pipeline_argsort_f32;
7174+
uint32_t idx = (uint32_t)ceilf(log2f(float(dst->ne[0])));
7175+
return ctx->device->pipeline_argsort_f32[idx];
71647176
}
71657177
return nullptr;
71667178
case GGML_OP_SUM:
@@ -7285,6 +7297,7 @@ static bool ggml_vk_op_supports_incontiguous(ggml_op op) {
72857297
case GGML_OP_CONCAT:
72867298
case GGML_OP_UPSCALE:
72877299
case GGML_OP_SQR:
7300+
case GGML_OP_SQRT:
72887301
case GGML_OP_SIN:
72897302
case GGML_OP_COS:
72907303
case GGML_OP_CLAMP:
@@ -7590,6 +7603,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
75907603
case GGML_OP_MUL:
75917604
case GGML_OP_SCALE:
75927605
case GGML_OP_SQR:
7606+
case GGML_OP_SQRT:
75937607
case GGML_OP_SIN:
75947608
case GGML_OP_COS:
75957609
case GGML_OP_CLAMP:
@@ -8237,6 +8251,10 @@ static void ggml_vk_sqr(ggml_backend_vk_context * ctx, vk_context& subctx, const
82378251
ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SQR, vk_op_unary_push_constants_init(src0, dst), dryrun);
82388252
}
82398253

8254+
static void ggml_vk_sqrt(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
8255+
ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SQRT, vk_op_unary_push_constants_init(src0, dst), dryrun);
8256+
}
8257+
82408258
static void ggml_vk_sin(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
82418259
ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SIN, vk_op_unary_push_constants_init(src0, dst), dryrun);
82428260
}
@@ -8485,16 +8503,8 @@ static void ggml_vk_argsort(ggml_backend_vk_context * ctx, vk_context& subctx, c
84858503

84868504
uint32_t ncols = src0->ne[0];
84878505

8488-
uint32_t ncols_pad = 1;
8489-
while (ncols_pad < ncols) {
8490-
ncols_pad *= 2;
8491-
}
8492-
8493-
GGML_ASSERT(ncols_pad <= 1024);
8494-
84958506
ggml_vk_op_f32<vk_op_argsort_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_ARGSORT, {
84968507
ncols,
8497-
ncols_pad,
84988508
op_params[0],
84998509
}, dryrun);
85008510
}
@@ -9700,6 +9710,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
97009710
case GGML_OP_UPSCALE:
97019711
case GGML_OP_SCALE:
97029712
case GGML_OP_SQR:
9713+
case GGML_OP_SQRT:
97039714
case GGML_OP_SIN:
97049715
case GGML_OP_COS:
97059716
case GGML_OP_CLAMP:
@@ -9769,6 +9780,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
97699780
case GGML_OP_UPSCALE:
97709781
case GGML_OP_SCALE:
97719782
case GGML_OP_SQR:
9783+
case GGML_OP_SQRT:
97729784
case GGML_OP_SIN:
97739785
case GGML_OP_COS:
97749786
case GGML_OP_CLAMP:
@@ -9870,6 +9882,10 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
98709882
case GGML_OP_SQR:
98719883
ggml_vk_sqr(ctx, compute_ctx, src0, node, dryrun);
98729884

9885+
break;
9886+
case GGML_OP_SQRT:
9887+
ggml_vk_sqrt(ctx, compute_ctx, src0, node, dryrun);
9888+
98739889
break;
98749890
case GGML_OP_SIN:
98759891
ggml_vk_sin(ctx, compute_ctx, src0, node, dryrun);
@@ -10121,6 +10137,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph *
1012110137
case GGML_OP_UPSCALE:
1012210138
case GGML_OP_SCALE:
1012310139
case GGML_OP_SQR:
10140+
case GGML_OP_SQRT:
1012410141
case GGML_OP_SIN:
1012510142
case GGML_OP_COS:
1012610143
case GGML_OP_CLAMP:
@@ -11360,13 +11377,16 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
1136011377
case GGML_OP_SILU_BACK:
1136111378
case GGML_OP_RMS_NORM_BACK:
1136211379
case GGML_OP_SQR:
11380+
case GGML_OP_SQRT:
1136311381
case GGML_OP_SIN:
1136411382
case GGML_OP_COS:
1136511383
case GGML_OP_CLAMP:
1136611384
case GGML_OP_LEAKY_RELU:
1136711385
case GGML_OP_OPT_STEP_ADAMW:
1136811386
case GGML_OP_OPT_STEP_SGD:
1136911387
return op->src[0]->type == GGML_TYPE_F32;
11388+
case GGML_OP_ARGSORT:
11389+
return op->ne[0] <= max_argsort_cols;
1137011390
case GGML_OP_UPSCALE:
1137111391
case GGML_OP_ACC:
1137211392
case GGML_OP_CONCAT:
@@ -11376,7 +11396,6 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
1137611396
case GGML_OP_DIAG_MASK_INF:
1137711397
case GGML_OP_SOFT_MAX:
1137811398
case GGML_OP_SOFT_MAX_BACK:
11379-
case GGML_OP_ARGSORT:
1138011399
case GGML_OP_SUM:
1138111400
case GGML_OP_SUM_ROWS:
1138211401
case GGML_OP_ARGMAX:
@@ -11803,6 +11822,8 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph *
1180311822
tensor_clone = ggml_scale_bias(ggml_ctx, src_clone[0], params[0], params[1]);
1180411823
} else if (tensor->op == GGML_OP_SQR) {
1180511824
tensor_clone = ggml_sqr(ggml_ctx, src_clone[0]);
11825+
} else if (tensor->op == GGML_OP_SQRT) {
11826+
tensor_clone = ggml_sqrt(ggml_ctx, src_clone[0]);
1180611827
} else if (tensor->op == GGML_OP_SIN) {
1180711828
tensor_clone = ggml_sin(ggml_ctx, src_clone[0]);
1180811829
} else if (tensor->op == GGML_OP_COS) {

0 commit comments

Comments
 (0)