Skip to content

Commit 991b824

Browse files
committed
fix: code format issues in concat.cpp and tests/test-backend-ops.cpp
1 parent 23813c5 commit 991b824

File tree

2 files changed

+162
-4
lines changed

2 files changed

+162
-4
lines changed

ggml/src/ggml-sycl/concat.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
static inline size_t elem_size(ggml_type t) {
1616
return ggml_type_size(t) / ggml_blck_size(t);
1717
}
18+
1819
template <typename T>
1920
static void concat_T_dim0(const T *x, const T *y, T *dst,
2021
const int ne0, const int ne00,

tests/test-backend-ops.cpp

Lines changed: 161 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2557,6 +2557,13 @@ struct test_cpy : public test_case {
25572557

25582558
return out;
25592559
}
2560+
2561+
void initialize_tensors(ggml_context * ctx) override {
2562+
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
2563+
// test extended range of values to check if casting between f32 and i32 is consistent
2564+
init_tensor_uniform(t, -150.f, 150.f);
2565+
}
2566+
}
25602567
};
25612568

25622569
// GGML_OP_CONT
@@ -3752,6 +3759,130 @@ struct test_clamp : public test_case {
37523759
}
37533760
};
37543761

3762+
// GGML_OP_FLOOR
3763+
struct test_floor : public test_case {
3764+
const ggml_type type;
3765+
const std::array<int64_t, 4> ne;
3766+
3767+
std::string vars() override {
3768+
return VARS_TO_STR2(type, ne);
3769+
}
3770+
3771+
test_floor(ggml_type type = GGML_TYPE_F32,
3772+
std::array<int64_t, 4> ne = {10, 2, 2, 2})
3773+
: type(type), ne(ne) {}
3774+
3775+
ggml_tensor * build_graph(ggml_context * ctx) override {
3776+
ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
3777+
ggml_set_param(a);
3778+
ggml_set_name(a, "a");
3779+
3780+
ggml_tensor * out = ggml_floor(ctx, a);
3781+
ggml_set_name(out, "out");
3782+
3783+
return out;
3784+
}
3785+
3786+
void initialize_tensors(ggml_context * ctx) override {
3787+
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
3788+
init_tensor_uniform(t, -10.0f, 10.0f);
3789+
}
3790+
}
3791+
};
3792+
3793+
// GGML_OP_CEIL
3794+
struct test_ceil : public test_case {
3795+
const ggml_type type;
3796+
const std::array<int64_t, 4> ne;
3797+
3798+
std::string vars() override {
3799+
return VARS_TO_STR2(type, ne);
3800+
}
3801+
3802+
test_ceil(ggml_type type = GGML_TYPE_F32,
3803+
std::array<int64_t, 4> ne = {10, 2, 2, 2})
3804+
: type(type), ne(ne) {}
3805+
3806+
ggml_tensor * build_graph(ggml_context * ctx) override {
3807+
ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
3808+
ggml_set_param(a);
3809+
ggml_set_name(a, "a");
3810+
3811+
ggml_tensor * out = ggml_ceil(ctx, a);
3812+
ggml_set_name(out, "out");
3813+
3814+
return out;
3815+
}
3816+
3817+
void initialize_tensors(ggml_context * ctx) override {
3818+
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
3819+
init_tensor_uniform(t, -10.0f, 10.0f);
3820+
}
3821+
}
3822+
};
3823+
3824+
// GGML_OP_ROUND
3825+
struct test_round : public test_case {
3826+
const ggml_type type;
3827+
const std::array<int64_t, 4> ne;
3828+
3829+
std::string vars() override {
3830+
return VARS_TO_STR2(type, ne);
3831+
}
3832+
3833+
test_round(ggml_type type = GGML_TYPE_F32,
3834+
std::array<int64_t, 4> ne = {10, 2, 2, 2})
3835+
: type(type), ne(ne) {}
3836+
3837+
ggml_tensor * build_graph(ggml_context * ctx) override {
3838+
ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
3839+
ggml_set_param(a);
3840+
ggml_set_name(a, "a");
3841+
3842+
ggml_tensor * out = ggml_round(ctx, a);
3843+
ggml_set_name(out, "out");
3844+
3845+
return out;
3846+
}
3847+
3848+
void initialize_tensors(ggml_context * ctx) override {
3849+
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
3850+
init_tensor_uniform(t, -10.0f, 10.0f);
3851+
}
3852+
}
3853+
};
3854+
3855+
// GGML_OP_TRUNC
3856+
struct test_trunc : public test_case {
3857+
const ggml_type type;
3858+
const std::array<int64_t, 4> ne;
3859+
3860+
std::string vars() override {
3861+
return VARS_TO_STR2(type, ne);
3862+
}
3863+
3864+
test_trunc(ggml_type type = GGML_TYPE_F32,
3865+
std::array<int64_t, 4> ne = {10, 2, 2, 2})
3866+
: type(type), ne(ne) {}
3867+
3868+
ggml_tensor * build_graph(ggml_context * ctx) override {
3869+
ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
3870+
ggml_set_param(a);
3871+
ggml_set_name(a, "a");
3872+
3873+
ggml_tensor * out = ggml_trunc(ctx, a);
3874+
ggml_set_name(out, "out");
3875+
3876+
return out;
3877+
}
3878+
3879+
void initialize_tensors(ggml_context * ctx) override {
3880+
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
3881+
init_tensor_uniform(t, -10.0f, 10.0f);
3882+
}
3883+
}
3884+
};
3885+
37553886
// GGML_OP_DIAG_MASK_INF
37563887
struct test_diag_mask_inf : public test_case {
37573888
const ggml_type type;
@@ -6210,6 +6341,10 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
62106341
test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {1, 0, 2, 3})); // cpy not-contiguous
62116342
}
62126343
}
6344+
test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_I32, {256, 2, 3, 4}));
6345+
test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_I32, {256, 2, 3, 4}, {1, 0, 2, 3}));
6346+
test_cases.emplace_back(new test_cpy(GGML_TYPE_I32, GGML_TYPE_F32, {256, 2, 3, 4}));
6347+
test_cases.emplace_back(new test_cpy(GGML_TYPE_I32, GGML_TYPE_F32, {256, 2, 3, 4}, {1, 0, 2, 3}));
62136348

62146349
test_cases.emplace_back(new test_cont());
62156350
test_cases.emplace_back(new test_cont(GGML_TYPE_F32, {2, 1, 1 ,1}));
@@ -6242,6 +6377,9 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
62426377
add_test_bin_bcast(type, {10, 5, 4, 3}, {1, 2, 2, 2});
62436378
add_test_bin_bcast(type, {10, 5, 4, 3}, {2, 2, 2, 2});
62446379

6380+
// test case for k_bin_bcast_unravel in CUDA backend
6381+
add_test_bin_bcast(type, {1, 1, 65536, 1}, {256, 1, 1, 1});
6382+
62456383
// stable diffusion
62466384
add_test_bin_bcast(type, {1280, 1, 1, 1}, {1, 1, 1, 1});
62476385
add_test_bin_bcast(type, {1280, 1, 1, 1}, {1, 16, 16, 1});
@@ -6571,13 +6709,21 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
65716709
test_cases.emplace_back(new test_cos (type));
65726710
test_cases.emplace_back(new test_clamp (type));
65736711
test_cases.emplace_back(new test_leaky_relu(type));
6712+
test_cases.emplace_back(new test_floor (type));
6713+
test_cases.emplace_back(new test_ceil (type));
6714+
test_cases.emplace_back(new test_round (type));
6715+
test_cases.emplace_back(new test_trunc (type));
65746716
test_cases.emplace_back(new test_sqr (type, {7, 1, 5, 3}));
65756717
test_cases.emplace_back(new test_sqrt (type, {7, 1, 5, 3}));
65766718
test_cases.emplace_back(new test_log (type, {7, 1, 5, 3}));
65776719
test_cases.emplace_back(new test_sin (type, {7, 1, 5, 3}));
65786720
test_cases.emplace_back(new test_cos (type, {7, 1, 5, 3}));
65796721
test_cases.emplace_back(new test_clamp (type, {7, 1, 5, 3}));
65806722
test_cases.emplace_back(new test_leaky_relu(type, {7, 1, 5, 3}));
6723+
test_cases.emplace_back(new test_floor (type, {7, 1, 5, 3}));
6724+
test_cases.emplace_back(new test_ceil (type, {7, 1, 5, 3}));
6725+
test_cases.emplace_back(new test_round (type, {7, 1, 5, 3}));
6726+
test_cases.emplace_back(new test_trunc (type, {7, 1, 5, 3}));
65816727
}
65826728

65836729
test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 1, 1}, 5));
@@ -6644,6 +6790,7 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
66446790
for (int64_t ne1 : {16, 1024}) {
66456791
test_cases.emplace_back(new test_soft_max_back(GGML_TYPE_F32, {ne0, ne1, 1, 1}, scale, max_bias));
66466792
test_cases.emplace_back(new test_soft_max_back(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, scale, max_bias));
6793+
test_cases.emplace_back(new test_soft_max_back(GGML_TYPE_F32, {ne0, ne1, 2, 3}, scale, max_bias));
66476794
}
66486795
}
66496796
}
@@ -6712,7 +6859,7 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
67126859
for (int dim : { 0, 1, 2, 3, }) {
67136860
test_cases.emplace_back(new test_concat(GGML_TYPE_F32, {11, 12, 13, 14}, 7, dim, v));
67146861
test_cases.emplace_back(new test_concat(GGML_TYPE_I32, {11, 12, 13, 14}, 7, dim, v));
6715-
test_cases.emplace_back(new test_concat(GGML_TYPE_I16, {11, 12, 13, 14}, 7, dim, v));
6862+
test_cases.emplace_back(new test_concat(GGML_TYPE_I16, {11, 12, 13, 14}, 7, dim, v));
67166863
}
67176864
}
67186865

@@ -6770,8 +6917,8 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
67706917
test_cases.emplace_back(new test_pad_ext(GGML_TYPE_F32, {11, 22, 33, 44}, 1, 2, 3, 4, 5, 6, 7, 8, v));
67716918
}
67726919

6773-
for (int hsk : { 40, 64, 80, 128, 192, 256, 576 }) {
6774-
for (int hsv : { 40, 64, 80, 128, 192, 256, 512 }) {
6920+
for (int hsk : { 40, 64, 80, 96, 128, 192, 256, 576 }) {
6921+
for (int hsv : { 40, 64, 80, 96, 128, 192, 256, 512 }) {
67756922
if (hsk != 192 && hsk != 576 && hsk != hsv) continue;
67766923
if (hsk == 192 && (hsv != 128 && hsv != 192)) continue;
67776924
if (hsk == 576 && hsv != 512) continue; // DeepSeek MLA
@@ -7101,7 +7248,17 @@ static void list_all_ops() {
71017248
static void show_test_coverage() {
71027249
std::set<std::string> all_ops;
71037250
for (int i = 1; i < GGML_OP_COUNT; i++) {
7104-
all_ops.insert(ggml_op_name((enum ggml_op)i));
7251+
auto op = (enum ggml_op)i;
7252+
if (op == GGML_OP_VIEW ||
7253+
op == GGML_OP_RESHAPE ||
7254+
op == GGML_OP_PERMUTE ||
7255+
op == GGML_OP_TRANSPOSE ||
7256+
op == GGML_OP_CONT ||
7257+
op == GGML_OP_GLU ||
7258+
op == GGML_OP_UNARY) {
7259+
continue;
7260+
}
7261+
all_ops.insert(ggml_op_name(op));
71057262
}
71067263
for (int i = 0; i < GGML_UNARY_OP_COUNT; i++) {
71077264
all_ops.insert(ggml_unary_op_name((enum ggml_unary_op)i));

0 commit comments

Comments
 (0)