Skip to content

Commit 8e88394

Browse files
committed
fix: code format issues in concat.cpp and tests/test-backend-ops.cpp
1 parent c1632f0 commit 8e88394

File tree

2 files changed

+30
-4
lines changed

2 files changed

+30
-4
lines changed

ggml/src/ggml-sycl/concat.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
static inline size_t elem_size(ggml_type t) {
1616
return ggml_type_size(t) / ggml_blck_size(t);
1717
}
18+
1819
template <typename T>
1920
static void concat_T_dim0(const T *x, const T *y, T *dst,
2021
const int ne0, const int ne00,

tests/test-backend-ops.cpp

Lines changed: 29 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2557,6 +2557,13 @@ struct test_cpy : public test_case {
25572557

25582558
return out;
25592559
}
2560+
2561+
void initialize_tensors(ggml_context * ctx) override {
2562+
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
2563+
// test extended range of values to check if casting between f32 and i32 is consistent
2564+
init_tensor_uniform(t, -150.f, 150.f);
2565+
}
2566+
}
25602567
};
25612568

25622569
// GGML_OP_CONT
@@ -6334,6 +6341,10 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
63346341
test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {1, 0, 2, 3})); // cpy not-contiguous
63356342
}
63366343
}
6344+
test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_I32, {256, 2, 3, 4}));
6345+
test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_I32, {256, 2, 3, 4}, {1, 0, 2, 3}));
6346+
test_cases.emplace_back(new test_cpy(GGML_TYPE_I32, GGML_TYPE_F32, {256, 2, 3, 4}));
6347+
test_cases.emplace_back(new test_cpy(GGML_TYPE_I32, GGML_TYPE_F32, {256, 2, 3, 4}, {1, 0, 2, 3}));
63376348

63386349
test_cases.emplace_back(new test_cont());
63396350
test_cases.emplace_back(new test_cont(GGML_TYPE_F32, {2, 1, 1 ,1}));
@@ -6366,6 +6377,9 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
63666377
add_test_bin_bcast(type, {10, 5, 4, 3}, {1, 2, 2, 2});
63676378
add_test_bin_bcast(type, {10, 5, 4, 3}, {2, 2, 2, 2});
63686379

6380+
// test case for k_bin_bcast_unravel in CUDA backend
6381+
add_test_bin_bcast(type, {1, 1, 65536, 1}, {256, 1, 1, 1});
6382+
63696383
// stable diffusion
63706384
add_test_bin_bcast(type, {1280, 1, 1, 1}, {1, 1, 1, 1});
63716385
add_test_bin_bcast(type, {1280, 1, 1, 1}, {1, 16, 16, 1});
@@ -6776,6 +6790,7 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
67766790
for (int64_t ne1 : {16, 1024}) {
67776791
test_cases.emplace_back(new test_soft_max_back(GGML_TYPE_F32, {ne0, ne1, 1, 1}, scale, max_bias));
67786792
test_cases.emplace_back(new test_soft_max_back(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, scale, max_bias));
6793+
test_cases.emplace_back(new test_soft_max_back(GGML_TYPE_F32, {ne0, ne1, 2, 3}, scale, max_bias));
67796794
}
67806795
}
67816796
}
@@ -6844,7 +6859,7 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
68446859
for (int dim : { 0, 1, 2, 3, }) {
68456860
test_cases.emplace_back(new test_concat(GGML_TYPE_F32, {11, 12, 13, 14}, 7, dim, v));
68466861
test_cases.emplace_back(new test_concat(GGML_TYPE_I32, {11, 12, 13, 14}, 7, dim, v));
6847-
test_cases.emplace_back(new test_concat(GGML_TYPE_I16, {11, 12, 13, 14}, 7, dim, v));
6862+
test_cases.emplace_back(new test_concat(GGML_TYPE_I16, {11, 12, 13, 14}, 7, dim, v));
68486863
}
68496864
}
68506865

@@ -6902,8 +6917,8 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
69026917
test_cases.emplace_back(new test_pad_ext(GGML_TYPE_F32, {11, 22, 33, 44}, 1, 2, 3, 4, 5, 6, 7, 8, v));
69036918
}
69046919

6905-
for (int hsk : { 40, 64, 80, 128, 192, 256, 576 }) {
6906-
for (int hsv : { 40, 64, 80, 128, 192, 256, 512 }) {
6920+
for (int hsk : { 40, 64, 80, 96, 128, 192, 256, 576 }) {
6921+
for (int hsv : { 40, 64, 80, 96, 128, 192, 256, 512 }) {
69076922
if (hsk != 192 && hsk != 576 && hsk != hsv) continue;
69086923
if (hsk == 192 && (hsv != 128 && hsv != 192)) continue;
69096924
if (hsk == 576 && hsv != 512) continue; // DeepSeek MLA
@@ -7233,7 +7248,17 @@ static void list_all_ops() {
72337248
static void show_test_coverage() {
72347249
std::set<std::string> all_ops;
72357250
for (int i = 1; i < GGML_OP_COUNT; i++) {
7236-
all_ops.insert(ggml_op_name((enum ggml_op)i));
7251+
auto op = (enum ggml_op)i;
7252+
if (op == GGML_OP_VIEW ||
7253+
op == GGML_OP_RESHAPE ||
7254+
op == GGML_OP_PERMUTE ||
7255+
op == GGML_OP_TRANSPOSE ||
7256+
op == GGML_OP_CONT ||
7257+
op == GGML_OP_GLU ||
7258+
op == GGML_OP_UNARY) {
7259+
continue;
7260+
}
7261+
all_ops.insert(ggml_op_name(op));
72377262
}
72387263
for (int i = 0; i < GGML_UNARY_OP_COUNT; i++) {
72397264
all_ops.insert(ggml_unary_op_name((enum ggml_unary_op)i));

0 commit comments

Comments
 (0)