Skip to content

Commit 573670b

Browse files
committed
vulkan: remove broken check_results GGML_OP_SET_ROWS support
1 parent 561f16d commit 573670b

File tree

1 file changed

+5
-4
lines changed

1 file changed

+5
-4
lines changed

ggml/src/ggml-vulkan/ggml-vulkan.cpp

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11168,7 +11168,7 @@ size_t comp_nb[GGML_MAX_DIMS];
1116811168
size_t check_counter = 0;
1116911169
static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, int tensor_idx) {
1117011170
ggml_tensor * tensor = cgraph->nodes[tensor_idx];
11171-
if (tensor->op == GGML_OP_TRANSPOSE) {
11171+
if (tensor->op == GGML_OP_TRANSPOSE || tensor->op == GGML_OP_SET_ROWS) {
1117211172
return;
1117311173
}
1117411174

@@ -11399,8 +11399,6 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph *
1139911399
} else {
1140011400
tensor_clone = ggml_cpy(ggml_ctx, src_clone[0], src_clone[1]);
1140111401
}
11402-
} else if (tensor->op == GGML_OP_SET_ROWS) {
11403-
tensor_clone = ggml_set_rows(ggml_ctx, src_clone[0], src_clone[1], src_clone[2]);
1140411402
} else if (tensor->op == GGML_OP_CONT) {
1140511403
tensor_clone = ggml_cont_4d(ggml_ctx, src_clone[0], tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
1140611404
} else if (tensor->op == GGML_OP_RESHAPE) {
@@ -11508,7 +11506,7 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph *
1150811506

1150911507
static void ggml_vk_check_results_1(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, int tensor_idx) {
1151011508
ggml_tensor * tensor = cgraph->nodes[tensor_idx];
11511-
if (tensor->op == GGML_OP_TRANSPOSE) {
11509+
if (tensor->op == GGML_OP_TRANSPOSE || tensor->op == GGML_OP_SET_ROWS) {
1151211510
return;
1151311511
}
1151411512
bool fused_rms_norm_mul = false;
@@ -11568,6 +11566,9 @@ static void ggml_vk_check_results_1(ggml_backend_vk_context * ctx, ggml_cgraph *
1156811566
} else if (tensor->type == GGML_TYPE_F16) {
1156911567
correct = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]));
1157011568
result = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]));
11569+
} else if (tensor->type == GGML_TYPE_BF16) {
11570+
correct = ggml_bf16_to_fp32(*(ggml_bf16_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]));
11571+
result = ggml_bf16_to_fp32(*(ggml_bf16_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]));
1157111572
} else if (tensor->type == GGML_TYPE_I32) {
1157211573
correct = *(int32_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
1157311574
result = *(int32_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);

0 commit comments

Comments
 (0)