Skip to content

Commit 5bb6194

Browse files
authored
Add cuda tests (#402)
* Fix pass manager errors in driver.cc * Add polybench-cuda * Add run lines * Add info files * XFAIL failing tests * gitignore
1 parent 8f8ae72 commit 5bb6194

File tree

31 files changed

+3452
-51
lines changed

31 files changed

+3452
-51
lines changed

tools/cgeist/.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
11
*.time *.exec1 *.out1
2+
*.execm
Lines changed: 169 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,169 @@
1+
// clang-format off
2+
// RUN: cgeist %s %stdinclude %cudaopts -O3 -o %s.execm && %s.execm 1 10 10 10 10
3+
// clang-format on
4+
/**
5+
* 2mm.c: This file is part of the PolyBench/C 3.2 test suite.
6+
*
7+
*
8+
* Contact: Louis-Noel Pouchet <[email protected]>
9+
* Web address: http://polybench.sourceforge.net
10+
*/
11+
#include <math.h>
12+
#include <stdio.h>
13+
#include <stdlib.h>
14+
#include <string.h>
15+
#include <unistd.h>
16+
17+
__global__ void kernel_A_mul_B(int ni, int nj, int nk, int nl, double alpha,
18+
double beta, double *tmp, double *A, double *B,
19+
double *C, double *D) {
20+
int i = blockDim.x * blockIdx.x + threadIdx.x;
21+
int j = blockDim.y * blockIdx.y + threadIdx.y;
22+
int k;
23+
double dot = 0.0;
24+
25+
if (i < ni && j < nj) {
26+
for (k = 0; k < nk; k++)
27+
dot += alpha * A[i * nk + k] * B[k * nj + j];
28+
tmp[i * nj + j] = dot;
29+
}
30+
}
31+
32+
__global__ void kernel_D_plus_tmp_mul_C(int ni, int nj, int nk, int nl,
33+
double alpha, double beta, double *tmp,
34+
double *A, double *B, double *C,
35+
double *D) {
36+
int i = blockDim.x * blockIdx.x + threadIdx.x;
37+
int l = blockDim.y * blockIdx.y + threadIdx.y;
38+
int j;
39+
double dot = 0.0;
40+
41+
if (i < ni && l < nl) {
42+
// D[i * nj + l] *= beta;
43+
dot = D[i * nj + l] * beta;
44+
45+
for (j = 0; j < nj; j++)
46+
// D[i * nl + l] += tmp[i * nj + j] * C[j * nl + l];
47+
dot += tmp[i * nj + j] * C[j * nl + l];
48+
D[i * nl + l] = dot;
49+
}
50+
}
51+
52+
short num_blocks(short num, short factor) {
53+
return (num + factor - 1) / factor;
54+
}
55+
56+
static void kernel(int ni, int nj, int nk, int nl, double alpha, double beta,
57+
double *tmp, double *A, double *B, double *C, double *D) {
58+
59+
unsigned threadsPerBlock = 256;
60+
dim3 block(threadsPerBlock / 32, 32, 1);
61+
62+
{
63+
dim3 grid(num_blocks(ni, block.x), num_blocks(nj, block.y), 1);
64+
kernel_A_mul_B<<<grid, block>>>(ni, nj, nk, nl, alpha, beta, tmp, A, B, C,
65+
D);
66+
}
67+
68+
{
69+
dim3 grid(num_blocks(ni, block.x), num_blocks(nl, block.y), 1);
70+
kernel_D_plus_tmp_mul_C<<<grid, block>>>(ni, nj, nk, nl, alpha, beta, tmp,
71+
A, B, C, D);
72+
}
73+
}
74+
75+
static void print_array(int ni, int nl, double *D) {
76+
int i, j;
77+
78+
for (i = 0; i < ni; i++)
79+
for (j = 0; j < nl; j++) {
80+
fprintf(stderr, "%0.2lf ", D[i * ni + j]);
81+
if ((i * ni + j) % 20 == 0)
82+
fprintf(stderr, "\n");
83+
}
84+
fprintf(stderr, "\n");
85+
}
86+
87+
static void init_array(int ni, int nj, int nk, int nl, double *A, double *B,
88+
double *C, double *D, double *tmp) {
89+
int i, j;
90+
91+
for (i = 0; i < ni; i++)
92+
for (j = 0; j < nk; j++)
93+
A[i * ni + j] = ((double)i * j) / ni;
94+
for (i = 0; i < nk; i++)
95+
for (j = 0; j < nj; j++)
96+
B[i * nk + j] = ((double)i * (j + 1)) / nj;
97+
for (i = 0; i < nl; i++)
98+
for (j = 0; j < nj; j++)
99+
C[i * nl + j] = ((double)i * (j + 3)) / nl;
100+
for (i = 0; i < ni; i++)
101+
for (j = 0; j < nl; j++)
102+
D[i * ni + j] = ((double)i * (j + 2)) / nk;
103+
for (i = 0; i < ni; i++)
104+
for (j = 0; j < nj; j++)
105+
tmp[i * ni + j] = 0;
106+
}
107+
108+
int main(int argc, char **argv) {
109+
int dump_code = atoi(argv[1]);
110+
long ni = atoi(argv[2]);
111+
long nj = atoi(argv[3]);
112+
long nk = atoi(argv[4]);
113+
long nl = atoi(argv[5]);
114+
115+
double alpha = 32412;
116+
double beta = 2123;
117+
double *A = (double *)malloc(ni * nk * sizeof(double));
118+
double *B = (double *)malloc(nk * nj * sizeof(double));
119+
double *C = (double *)malloc(nl * nj * sizeof(double));
120+
double *D = (double *)malloc(ni * nl * sizeof(double));
121+
double *tmp = (double *)malloc(ni * nj * sizeof(double));
122+
123+
init_array(ni, nj, nk, nl, A, B, C, D, tmp);
124+
125+
double *dev_A;
126+
double *dev_B;
127+
double *dev_C;
128+
double *dev_D;
129+
double *dev_tmp;
130+
double *dev_alpha;
131+
double *dev_beta;
132+
cudaMalloc(&dev_A, ni * nk * sizeof(double));
133+
cudaMalloc(&dev_B, nk * nj * sizeof(double));
134+
cudaMalloc(&dev_C, nl * nj * sizeof(double));
135+
cudaMalloc(&dev_D, ni * nl * sizeof(double));
136+
cudaMalloc(&dev_tmp, ni * nj * sizeof(double));
137+
cudaMemcpy(dev_A, A, ni * nk * sizeof(double), cudaMemcpyHostToDevice);
138+
cudaMemcpy(dev_B, B, nk * nj * sizeof(double), cudaMemcpyHostToDevice);
139+
cudaMemcpy(dev_C, C, nl * nj * sizeof(double), cudaMemcpyHostToDevice);
140+
cudaMemcpy(dev_D, D, ni * nl * sizeof(double), cudaMemcpyHostToDevice);
141+
cudaMemcpy(dev_tmp, tmp, ni * nj * sizeof(double), cudaMemcpyHostToDevice);
142+
143+
kernel(ni, nj, nk, nl, alpha, beta, dev_tmp, dev_A, dev_B, dev_C, dev_D);
144+
145+
cudaMemcpy(D, dev_D, ni * nl * sizeof(double), cudaMemcpyDeviceToHost);
146+
cudaFree((void *)dev_A);
147+
cudaFree((void *)dev_B);
148+
cudaFree((void *)dev_C);
149+
cudaFree((void *)dev_D);
150+
cudaFree((void *)dev_tmp);
151+
cudaFree((void *)dev_alpha);
152+
cudaFree((void *)dev_beta);
153+
154+
if (dump_code == 1)
155+
print_array(ni, nk, D);
156+
157+
free((void *)tmp);
158+
;
159+
free((void *)A);
160+
;
161+
free((void *)B);
162+
;
163+
free((void *)C);
164+
;
165+
free((void *)D);
166+
;
167+
168+
return 0;
169+
}
Lines changed: 161 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,161 @@
1+
// clang-format off
2+
// COM: cgeist %s %stdinclude %cudaopts -O3 -o %s.execm && %s.execm
3+
// RUN: true
4+
// clang-format on
5+
/**
6+
* 3mm.c: This file is part of the PolyBench/C 3.2 test suite.
7+
*
8+
*
9+
* Contact: Louis-Noel Pouchet <[email protected]>
10+
* Web address: http://polybench.sourceforge.net
11+
*/
12+
#include <math.h>
13+
#include <stdio.h>
14+
#include <stdlib.h>
15+
#include <string.h>
16+
#include <unistd.h>
17+
18+
__global__ void kernel_A_mul_B(int ni, int nj, int nk, double *C, double *A,
19+
double *B) {
20+
int i = blockDim.x * blockIdx.x + threadIdx.x;
21+
int j = blockDim.y * blockIdx.y + threadIdx.y;
22+
double dot = 0.0;
23+
24+
if (i < ni && j < nj) {
25+
for (int k = 0; k < nk; k++)
26+
// C[i * nj + j] += A[i * nk + k] * B[k * nj + j];
27+
dot += A[i * nk + k] * B[k * nj + j];
28+
C[i * nj + j] = dot;
29+
}
30+
}
31+
32+
static unsigned num_blocks(int num, int factor) {
33+
return (num + factor - 1) / factor;
34+
}
35+
36+
static void init_array(int ni, int nj, int nk, int nl, int nm, double *A,
37+
double *B, double *C, double *D, double *E, double *F,
38+
double *G) {
39+
int i, j;
40+
41+
for (i = 0; i < ni; i++)
42+
for (j = 0; j < nk; j++)
43+
A[i * ni + j] = ((double)i * j) / ni;
44+
for (i = 0; i < nk; i++)
45+
for (j = 0; j < nj; j++)
46+
B[i * nk + j] = ((double)i * (j + 1)) / nj;
47+
for (i = 0; i < nj; i++)
48+
for (j = 0; j < nm; j++)
49+
C[i * nj + j] = ((double)i * (j + 3)) / nl;
50+
for (i = 0; i < nm; i++)
51+
for (j = 0; j < nl; j++)
52+
D[i * nm + j] = ((double)i * (j + 2)) / nk;
53+
for (i = 0; i < ni; i++)
54+
for (j = 0; j < nj; j++)
55+
E[i * ni + j] = 0;
56+
for (i = 0; i < nj; i++)
57+
for (j = 0; j < nl; j++)
58+
F[i * nj + j] = 0;
59+
for (i = 0; i < ni; i++)
60+
for (j = 0; j < nl; j++)
61+
G[i * ni + j] = 0;
62+
}
63+
64+
static void print_array(int ni, int nl, double *G) {
65+
int i, j;
66+
67+
for (i = 0; i < ni; i++)
68+
for (j = 0; j < nl; j++) {
69+
fprintf(stderr, "%0.2lf ", G[i * ni + j]);
70+
if ((i * ni + j) % 20 == 0)
71+
fprintf(stderr, "\n");
72+
}
73+
fprintf(stderr, "\n");
74+
}
75+
76+
static void kernel(int ni, int nj, int nk, int nl, int nm, double *E, double *A,
77+
double *B, double *F, double *C, double *D, double *G) {
78+
unsigned threadsPerBlock = 256;
79+
dim3 block(threadsPerBlock / 32, 32, 1);
80+
81+
{
82+
dim3 grid(num_blocks(ni, block.x), num_blocks(nj, block.y), 1);
83+
kernel_A_mul_B<<<grid, block>>>(ni, nj, nk, E, A, B);
84+
}
85+
86+
{
87+
dim3 grid(num_blocks(nj, block.x), num_blocks(nl, block.y), 1);
88+
kernel_A_mul_B<<<grid, block>>>(nj, nl, nm, F, C, D);
89+
}
90+
91+
{
92+
dim3 grid(num_blocks(ni, block.x), num_blocks(nl, block.y), 1);
93+
kernel_A_mul_B<<<grid, block>>>(ni, nl, nj, G, E, F);
94+
}
95+
}
96+
97+
int main(int argc, char **argv) {
98+
99+
int dump_code = atoi(argv[1]);
100+
int ni = atoi(argv[2]);
101+
int nj = atoi(argv[3]);
102+
int nk = atoi(argv[4]);
103+
int nl = atoi(argv[5]);
104+
int nm = atoi(argv[6]);
105+
106+
double *A = (double *)malloc(ni * nk * sizeof(double));
107+
double *B = (double *)malloc(nk * nj * sizeof(double));
108+
double *C = (double *)malloc(nj * nm * sizeof(double));
109+
double *D = (double *)malloc(nm * nl * sizeof(double));
110+
double *E = (double *)malloc(ni * nj * sizeof(double));
111+
double *F = (double *)malloc(nj * nl * sizeof(double));
112+
double *G = (double *)malloc(ni * nl * sizeof(double));
113+
114+
init_array(ni, nj, nk, nl, nm, A, B, C, D, E, F, G);
115+
116+
double *dev_A;
117+
double *dev_B;
118+
double *dev_C;
119+
double *dev_D;
120+
double *dev_E;
121+
double *dev_F;
122+
double *dev_G;
123+
cudaMalloc(&dev_A, ni * nk * sizeof(double));
124+
cudaMalloc(&dev_B, nk * nj * sizeof(double));
125+
cudaMalloc(&dev_C, nl * nj * sizeof(double));
126+
cudaMalloc(&dev_D, ni * nl * sizeof(double));
127+
cudaMalloc(&dev_E, ni * nj * sizeof(double));
128+
cudaMalloc(&dev_F, nj * nl * sizeof(double));
129+
cudaMalloc(&dev_G, ni * nl * sizeof(double));
130+
cudaMemcpy(dev_A, A, ni * nk * sizeof(double), cudaMemcpyHostToDevice);
131+
cudaMemcpy(dev_B, B, nk * nj * sizeof(double), cudaMemcpyHostToDevice);
132+
cudaMemcpy(dev_C, C, nl * nj * sizeof(double), cudaMemcpyHostToDevice);
133+
cudaMemcpy(dev_D, D, ni * nl * sizeof(double), cudaMemcpyHostToDevice);
134+
cudaMemcpy(dev_E, E, ni * nj * sizeof(double), cudaMemcpyHostToDevice);
135+
cudaMemcpy(dev_F, F, nj * nl * sizeof(double), cudaMemcpyHostToDevice);
136+
cudaMemcpy(dev_G, G, ni * nl * sizeof(double), cudaMemcpyHostToDevice);
137+
138+
kernel(ni, nj, nk, nl, nm, dev_E, dev_A, dev_B, dev_F, dev_C, dev_D, dev_G);
139+
140+
cudaMemcpy(G, dev_G, ni * nl * sizeof(double), cudaMemcpyDeviceToHost);
141+
142+
if (dump_code == 1)
143+
print_array(ni, nl, G);
144+
145+
free((void *)E);
146+
;
147+
free((void *)A);
148+
;
149+
free((void *)B);
150+
;
151+
free((void *)F);
152+
;
153+
free((void *)C);
154+
;
155+
free((void *)D);
156+
;
157+
free((void *)G);
158+
;
159+
160+
return 0;
161+
}
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
* * * * * * * * * * * * *
2+
* Authors of PolyBench *
3+
* * * * * * * * * * * * *
4+
5+
6+
* Louis-Noel Pouchet <[email protected]>
7+
Who provided packaging and harmonization of all test files,
8+
the PolyBench infrastructure and machinery, and several
9+
reference C files.
10+
11+
* Uday Bondugula <Uday Bondhugula <[email protected]>
12+
Who provided many of the original reference C files, including
13+
Fortran to C translation.
14+

0 commit comments

Comments
 (0)