Skip to content

Commit 5aa4ae7

Browse files
committed
make CI happy
1 parent 2357922 commit 5aa4ae7

File tree

1 file changed

+17
-8
lines changed

1 file changed

+17
-8
lines changed

tests/test-conv3d.cpp

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ static void ggml_log_callback_default(ggml_log_level level, const char * text, v
2828
fflush(stderr);
2929
}
3030

31+
3132
struct test_model {
3233
struct ggml_tensor * a;
3334
struct ggml_tensor * b;
@@ -36,6 +37,14 @@ struct test_model {
3637
struct ggml_context * ctx;
3738
};
3839

40+
struct ggml_cgraph * build_graph_0(const test_model& model, const int64_t ic, const int64_t n, const int64_t oc);
41+
struct ggml_cgraph * build_graph_1(const test_model& model, const int64_t ic, const int64_t n, const int64_t oc);
42+
typedef struct ggml_cgraph* (*build_graph_t)(const test_model& model,
43+
const int64_t i0, const int64_t i1, const int64_t i2);
44+
45+
std::vector<float> compute_graph(const test_model & model, ggml_gallocr_t allocr,
46+
build_graph_t build_graph, int iters,
47+
const int64_t ic, const int64_t n, const int64_t oc, double *t);
3948

4049

4150
void load_model(test_model & model, int ic, int oc, int iw, int ih, int id,
@@ -101,6 +110,8 @@ void load_model(test_model & model, int ic, int oc, int iw, int ih, int id,
101110
fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
102111
}
103112
}
113+
#else
114+
GGML_UNUSED(use_gpu);
104115
#endif
105116

106117
#ifdef GGML_USE_METAL
@@ -112,6 +123,8 @@ void load_model(test_model & model, int ic, int oc, int iw, int ih, int id,
112123
fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
113124
}
114125
}
126+
#else
127+
GGML_UNUSED(use_gpu);
115128
#endif
116129

117130
if(!model.backend) {
@@ -164,10 +177,11 @@ void load_model(test_model & model, int ic, int oc, int iw, int ih, int id,
164177
}
165178
}
166179

167-
typedef struct ggml_cgraph* (*build_graph_t)(const test_model& model,
168-
const int64_t i0, const int64_t i1, const int64_t i2);
169-
170180
struct ggml_cgraph * build_graph_0(const test_model& model, const int64_t ic, const int64_t n, const int64_t oc) {
181+
182+
GGML_UNUSED(n);
183+
GGML_UNUSED(oc);
184+
171185
static size_t buf_size = ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead();
172186
static std::vector<uint8_t> buf(buf_size);
173187

@@ -370,7 +384,6 @@ int main(void)
370384
// fprintf(stderr, "%s: compute buffer size: %.2f MB\n", __func__, mem_size/1024.0f/1024.0f);
371385

372386

373-
struct ggml_cgraph * gf_res_0 = NULL;
374387
int iterations = 20;
375388

376389
double run_time0;
@@ -389,12 +402,8 @@ int main(void)
389402
// compute the required memory
390403
ggml_gallocr_reserve(allocr, gf);
391404
size_t mem_size1 = ggml_gallocr_get_buffer_size(allocr, 0);
392-
// fprintf(stderr, "%s: compute buffer size: %.2f MB\n", __func__, mem_size/1024.0f/1024.0f);
393-
394-
struct ggml_cgraph * gf_res_1 = NULL;
395405

396406
double run_time1;
397-
// std::vector<float> wino_data = compute_graph(model, allocr, build_graph_1, iterations, &run_time1);
398407
std::vector<float> conv2d_data = compute_graph(model, allocr, build_graph_1, iterations,
399408
std::get<0>(c), 1, std::get<1>(c), &run_time1);
400409

0 commit comments

Comments
 (0)