-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathCUDA_Samples.tex
More file actions
399 lines (309 loc) · 11.6 KB
/
CUDA_Samples.tex
File metadata and controls
399 lines (309 loc) · 11.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
\chapter{CUDA SDK Sample Codes}
\label{chap:cuda-sdk-sample}
\section{Starting CUDA programming}
\label{sec:start-cuda-progr}
\begin{verbatim}
1. Copy the template project
cd <SDK_INSTALL_PATH>/projects
cp -r template <myproject>
2. Edit the filenames of the project to suit your needs
mv template.cu myproject.cu
mv template_kernel.cu myproject_kernel.cu
mv template_gold.cpp myproject_gold.cpp
3. Edit the Makefile and source files. Just search and
replace all occurrences of "template" with "myproject".
4. Build the project
make
You can build a debug version with "make dbg=1",
an emulation version with "make emu=1", and
a debug emulation with "make dbg=1 emu=1".
[Emulation is no longer supported in CUDA 3.1]
5. Run the program
../../bin/linux32/release/myproject
(It should print "Test PASSED")
6. Now modify the code to perform the computation you require.
See the CUDA Programming Guide for details of
programming in CUDA.
\end{verbatim}
The library \verb!libcutil! is created to help you performing some
tasks. It is provided as is by the SDK examples (libcutil is simply
for convenience -- it is not a part of CUDA and is not required for
your own CUDA programs). You can copy it to your project to use it for
free.
\subsection{Compile a single sample}
\label{sec:comp-single-sample}
If you download only a single sample, then to compile it, e.g.
{\it reduction} application, you need to use the local Makefile
\begin{verbatim}
cd NVIDIA GPU Computing SDK/C/src/reduction
make Makefile
\end{verbatim}
If we use PGI C++ compiler {\bf pgcpp}
\begin{verbatim}
pgcpp simpleCUBLAS.c -I/opt/cuda/include -D__GNUC__ -L/opt/cuda/lib64 -lcublas
setenv LD_LIBRARY_PATH /opt/cuda/lib64
a.out
\end{verbatim}
or
\begin{verbatim}
pgcpp simpleCUBLAS.c -I/opt/cuda/include
-I/opt/cuda-3.0-beta/sdk/C/common/inc -D__GNUC__
obj/x86_64/release/MonteCarlo*.o
-L/opt/cuda/lib64 -lcudart
-L/opt/cuda-3.0-beta/sdk/C/common/lib/linux
-L/opt/cuda-2.3/lib64/
-L../../lib
-L../../common/lib/linux
-L../../../shared/lib -lcudart
-L/opt/cuda-2.3/lib64
-L../../lib
-L../../common/lib/linux
-L../../shared/lib -lcudart -lcutil_x86_64 -shrutil_x86_64
setenv LD_LIBRARY_PATH /opt/cuda/lib64
a.out
\end{verbatim}
NOTE: We need to use the flag \verb!-D__GNUC__! since CUDA header
files don't recognize pgcpp.
\textcolor{red}{There are many case that you need to include the file}
\verb!cutil_inline.h!
\textcolor{red}{locate at the ``common'' folder}.
\subsection{libshrutil\_x86\_64}
\label{sec:libshrutil_x86_64}
Since CUDA you have an additional shared utility, beside libcutil,
called \verb!libshrutil_x86_64! (for 64-bit) or \verb!libshrutil_x86!
(for 32-bit).
If you compile a single project in CUDA SDK, you need to add
\begin{verbatim}
NVIDIA GPU Computing SDK/shared/lib
\end{verbatim}
to the link path
\begin{verbatim}
nvcc -I../../common/lib -L../../../shared/lib
\end{verbatim}
or the \verb!LD_LIBRARY_PATH! environment variable.
\section{Reduction}
\label{sec:reduction}
\begin{itemize}
\item CUDPP support reductions for INT, UNIT, FLOAT, CHAR, UCHAR.
\item Thrust support reductions for all types, including DOUBLE
\item CUBLAS has: \verb!cublasIsamax()!, yet slower than Mark Harris'
s reduction algorithm (CUDA SDK)
\begin{verbatim}
int cublasIsmax(int n, const float* x, int incx)
int cublasIdmax(int n, const double* x, int incx)
\end{verbatim}
return the smallest index of the maximum magnitude element of vector
$x$, with the pattern $x[1+i*incx]$, $i=0..n-1$. The result is
1-based indexing for compatibility with Fortran.
\begin{verbatim}
int cublasIsmax(int n, const float* x, int incx)
int cublasIsmax(int n, const double* x, int incx)
\end{verbatim}
returns the sum of absolute values of the elements in the vector
$x$.
NOTE:
\textcolor{red}{CUBLAS 2.0 support both single-precision and
double-precision.}
\item CUDA sample: \verb!reduction_kernel.cpp! by Mark Harris. Except
reduce0, there are 6 versions which can be chosen via the
\verb!whichKernel! parameters (default is 6). NOTE: The code
requires the array size $n$ to be {\it power of 2}, so you may need
a code to set up the right array size first.
\begin{enumerate}
\item [reduce0] slowest, use modulo to choose the active thread
\item [reduce1] use contiguous threads, yet has many shared
memory bank conflicts
\item [reduce2] use sequential addressing - no divergence or bank
conflicts
\item [reduce3] use $n/2$ threads (with $n$ is number of elements) -
perform 1st level of reduction
\item [reduce4] unroll the last warp - need a minimum of
64*sizeof(T) bytes of shared memory (T is the type of data
elements).
\item [reduce5] completely unroll
\item [reduce6] add multiple elements per thread sequentially -
previous versions process a single pair of elements per thread
\end{enumerate}
NOTE: The size of thread blocks must be the power of 2: 1, 2, 4, 8,
16, 32, 64, 128, 256, 512.
\end{itemize}
\subsection{Min/Max}
\label{sec:minmax}
\subsection{Sum}
\label{sec:sum}
\section{A sample header file}
\label{sec:header_file_gpumcml}
\url{http://gpumcml.googlecode.com/svn-history/r197/trunk/gpumcml/simple/gpumcml_kernel.h}
\begin{lstlisting}
/*****************************************************************************
*
* Header file for GPU-related data structures and kernel configurations
*
****************************************************************************/
/*
* This file is part of GPUMCML.
*
* GPUMCML is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GPUMCML is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GPUMCML. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _GPUMCML_KERNEL_H_
#define _GPUMCML_KERNEL_H_
#include "gpumcml.h"
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
/* Number of simulation steps performed by each thread in one kernel call
*/
#define NUM_STEPS 50000 //Use 5000 for faster response time
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// Make sure __CUDA_ARCH__ is always defined by the user.
#ifdef _WIN32
#define __CUDA_ARCH__ 120
#endif
#ifndef __CUDA_ARCH__
#error "__CUDA_ARCH__ undefined!"
#endif
/**
* Although this simple version of GPUMCML is not intended for high
* performance, there are still a few parameters to be configured for
* different GPUs.
*
* - NUM_BLOCKS:
* number of thread blocks in the grid to be launched
*
* - NUM_THREADS_PER_BLOCK:
* number of threads per thread block
*
* - EMULATED_ATOMIC:
* Enable this option for GPUs with Compute Capability 1.1,
* which do not support 64-bit atomicAdd to the global memory.
* In this case, we use two 32-bit atomicAdd's to emulate the
* 64-bit version.
*
* - USE_TRUE_CACHE:
* Enable this option for GPUs with Compute Capability 2.0 (Fermi),
* which have a 64KB configurable L1 cache in each SM.
* If enabled, the L1 cache is configured to have 48KB of true cache
* and 16KB of shared memory, as opposed to 16KB of true cache and
* 48KB of shared memory. Since the shared memory is not utilized
* in this simple version, you are encouraged to enable this option
* to cache more accesses to the absorption array in the global memory.
*/
/////////////////////////////////////////////
// Compute Capability 2.0
/////////////////////////////////////////////
#if __CUDA_ARCH__ == 200
#define NUM_BLOCKS 30
#define NUM_THREADS_PER_BLOCK 512
// #define EMULATED_ATOMIC
#define USE_TRUE_CACHE
/////////////////////////////////////////////
// Compute Capability 1.2 or 1.3
/////////////////////////////////////////////
#elif (__CUDA_ARCH__ == 120) || (__CUDA_ARCH__ == 130)
#define NUM_BLOCKS 30
#define NUM_THREADS_PER_BLOCK 256
#define EMULATED_ATOMIC
/////////////////////////////////////////////
// Compute Capability 1.1
/////////////////////////////////////////////
#elif (__CUDA_ARCH__ == 110)
#define NUM_BLOCKS 14 // should match the number of SMs on the GPUs
#define NUM_THREADS_PER_BLOCK 192
#define EMULATED_ATOMIC
/////////////////////////////////////////////
// Unsupported Compute Capability
/////////////////////////////////////////////
#else
#error "GPUMCML only supports compute capability 1.1 to 2.0!"
#endif
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
/**
* Derived macros
*/
#define NUM_THREADS (NUM_BLOCKS * NUM_THREADS_PER_BLOCK)
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
typedef struct __align__(16)
{
FLOAT init_photon_w; // initial photon weight
FLOAT dz; // z grid separation.[cm]
FLOAT dr; // r grid separation.[cm]
UINT32 na; // array range 0..na-1.
UINT32 nz; // array range 0..nz-1.
UINT32 nr; // array range 0..nr-1.
UINT32 num_layers; // number of layers.
} SimParamGPU;
typedef struct __align__(16)
{
FLOAT z0, z1; // z coordinates of a layer. [cm]
FLOAT n; // refractive index of a layer.
FLOAT muas; // mua + mus
FLOAT rmuas; // 1/(mua+mus)
FLOAT mua_muas; // mua/(mua+mus)
FLOAT g; // anisotropy.
FLOAT cos_crit0, cos_crit1;
} LayerStructGPU;
// The max number of layers supported (MAX_LAYERS including 2 ambient layers)
#define MAX_LAYERS 100
__constant__ SimParamGPU d_simparam;
__constant__ LayerStructGPU d_layerspecs[MAX_LAYERS];
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// Thread-private states that live across batches of kernel invocations
// Each field is an array of length NUM_THREADS.
//
// We use a struct of arrays as opposed to an array of structs to enable
// global memory coalescing.
//
typedef struct
{
// cartesian coordinates of the photon [cm]
FLOAT *photon_x;
FLOAT *photon_y;
FLOAT *photon_z;
// directional cosines of the photon
FLOAT *photon_ux;
FLOAT *photon_uy;
FLOAT *photon_uz;
FLOAT *photon_w; // photon weight
FLOAT *photon_sleft; // leftover step size [cm]
// index to layer where the photon resides
UINT32 *photon_layer;
UINT32 *is_active; // is this thread active?
} GPUThreadStates;
typedef struct
{
// cartesian coordinates of the photon [cm]
FLOAT x;
FLOAT y;
FLOAT z;
// directional cosines of the photon
FLOAT ux;
FLOAT uy;
FLOAT uz;
FLOAT w; // photon weight
FLOAT s; // step size [cm]
FLOAT sleft; // leftover step size [cm]
// index to layer where the photon resides
UINT32 layer;
// flag to indicate if photon hits a boundary
UINT32 hit;
} PhotonStructGPU;
#endif // _GPUMCML_KERNEL_H_
\end{lstlisting}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "gpucomputing"
%%% End: