diff --git a/README.md b/README.md index 0e38ddb..0d05e1c 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,90 @@ CUDA Stream Compaction **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2** -* (TODO) YOUR NAME HERE - * (TODO) [LinkedIn](), [personal website](), [twitter](), etc. -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Wenqing Wang + * [LinkedIn](https://www.linkedin.com/in/wenqingwang0910/) +* Tested on: Windows 11, i7-11370H @ 3.30GHz 16.0 GB, GTX 3050 Ti -### (TODO: Your README) +# Highlights +* This project implemented the scan (exclusive prefix sum) and string compact based on the following methods. + * CPU scan/compact (for comparision purpose) + * GPU Naïve parallel scan/compact + * GPU Work-efficient scan/compact + * Thrust scan (for comparision purpose) + +* A sample output of this project would like this: -Include analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) +(Note: Results are tested on with `blockSize = 256` and `ArraySize = 2^24` ) +``` +**************** +** SCAN TESTS ** +**************** + [ 5 27 39 37 38 10 22 28 45 5 12 35 19 ... 10 0 ] +==== cpu scan, power-of-two ==== + elapsed time: 28.2238ms (std::chrono Measured) + [ 0 5 32 71 108 146 156 178 206 251 256 268 303 ... 410870447 410870457 ] +==== cpu scan, non-power-of-two ==== + elapsed time: 27.9835ms (std::chrono Measured) + [ 0 5 32 71 108 146 156 178 206 251 256 268 303 ... 410870378 410870402 ] + passed +==== naive scan, power-of-two ==== + elapsed time: 21.7824ms (CUDA Measured) + passed +==== naive scan, non-power-of-two ==== + elapsed time: 21.7999ms (CUDA Measured) + passed +==== work-efficient scan, power-of-two ==== + elapsed time: 14.9641ms (CUDA Measured) + passed +==== work-efficient scan, non-power-of-two ==== + elapsed time: 15.037ms (CUDA Measured) + passed +==== thrust scan, power-of-two ==== + elapsed time: 1.55648ms (CUDA Measured) + passed +==== thrust scan, non-power-of-two ==== + elapsed time: 1.56058ms (CUDA Measured) + passed + +***************************** +** STREAM COMPACTION TESTS ** +***************************** + [ 3 3 3 1 0 2 2 2 3 1 2 3 1 ... 0 0 ] +==== cpu compact without scan, power-of-two ==== + elapsed time: 42.3925ms (std::chrono Measured) + [ 3 3 3 1 2 2 2 3 1 2 3 1 1 ... 1 3 ] + passed +==== cpu compact without scan, non-power-of-two ==== + elapsed time: 42.6625ms (std::chrono Measured) + [ 3 3 3 1 2 2 2 3 1 2 3 1 1 ... 3 1 ] + passed +==== cpu compact with scan ==== + elapsed time: 96.9729ms (std::chrono Measured) + [ 3 3 3 1 2 2 2 3 1 2 3 1 1 ... 1 3 ] + passed +==== work-efficient compact, power-of-two ==== + elapsed time: 18.041ms (CUDA Measured) + passed +==== work-efficient compact, non-power-of-two ==== + elapsed time: 18.0232ms (CUDA Measured) + passed +``` +By comparing the time consumed by each method on different array sizes, we can see how much better the GPU performs than the CPU when processing larger data sets. + +# Performance Analysis +## Scan Runtime Analysis + +![Scan runtime_1](https://user-images.githubusercontent.com/33616958/190931735-eaa086bf-3206-4127-bc64-4d6149c7b746.png) + +![Scan runtime_2](https://user-images.githubusercontent.com/33616958/190931738-2ec8f4ee-1242-4e22-af11-851b9f9846af.png) + +* From the above diagrams, we can see that when the array size is smaller than `2^16`, the performance of CPU side scan/compact is actually better than that on GPU. This is probably because the GPU implementation involves a lot of read/write operations to global memory, and the advantages of parallel computing are not obvious when targeting smaller data sets. However, as the array size increases, the GPU starts to outperform the CPU, and the gap of their performance keeps widening. The optimized work-efficient method, which involves fewer scans operations, has a shorter execution time compared to the naive method. The trust method has the best performance on large data sets. + +## Compact Runtime Analysis + +![Compact runtime_1](https://user-images.githubusercontent.com/33616958/190931740-47fa15c7-d5e9-44fe-aeff-c54bc20ae95e.png) + +![Compact runtime_2](https://user-images.githubusercontent.com/33616958/190931745-c0073135-8358-4a6b-b8f9-a28b72da9b8d.png) + +* As can be seen from the above graphs, the performance of the stream compression algorithm trends similarly to the scanning algorithm as the array size increases. diff --git a/src/main.cpp b/src/main.cpp index 896ac2b..90fcea1 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -13,7 +13,7 @@ #include #include "testing_helpers.hpp" -const int SIZE = 1 << 8; // feel free to change the size of array +const int SIZE = 1 << 24; // feel free to change the size of array const int NPOT = SIZE - 3; // Non-Power-Of-Two int *a = new int[SIZE]; int *b = new int[SIZE]; diff --git a/stream_compaction/common.cu b/stream_compaction/common.cu index 2ed6d63..4da773a 100644 --- a/stream_compaction/common.cu +++ b/stream_compaction/common.cu @@ -24,6 +24,17 @@ namespace StreamCompaction { */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + + if (idata[index] == 0) { + bools[index] = 0; + } + else { + bools[index] = 1; + } } /** @@ -33,6 +44,15 @@ namespace StreamCompaction { __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + + if (bools[index] != 0) { + odata[indices[index]] = idata[index]; + } + } } diff --git a/stream_compaction/cpu.cu b/stream_compaction/cpu.cu index 719fa11..6ffd07d 100644 --- a/stream_compaction/cpu.cu +++ b/stream_compaction/cpu.cu @@ -20,6 +20,10 @@ namespace StreamCompaction { void scan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + odata[0] = 0; + for (int k = 1; k < n; k++) { + odata[k] = odata[k - 1] + idata[k - 1]; + } timer().endCpuTimer(); } @@ -31,8 +35,14 @@ namespace StreamCompaction { int compactWithoutScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + int num = 0; + for (int i = 0; i < n; i++) { + if (idata[i] != 0) { + odata[num++] = idata[i]; + } + } timer().endCpuTimer(); - return -1; + return num; } /** @@ -43,8 +53,40 @@ namespace StreamCompaction { int compactWithScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + + // map + int* omap = new int[n * sizeof(int)]; + for (int i = 0; i < n; i++) { + if (idata[i] == 0) { + omap[i] = 0; + } + else { + omap[i] = 1; + } + } + + // scan + int* oscan = new int[n * sizeof(int)]; + int num = 0; + oscan[0] = 0; + for (int i = 1; i < n; i++) { + oscan[i] = oscan[i - 1] + omap[i - 1]; + } + + // scatter + for (int i = 0; i < n; i++) { + if (omap[i] != 0) { + odata[oscan[i]] = idata[i]; + ++num; + } + } + timer().endCpuTimer(); - return -1; + + free(omap); + free(oscan); + + return num; } } } diff --git a/stream_compaction/efficient.cu b/stream_compaction/efficient.cu index 2db346e..308053f 100644 --- a/stream_compaction/efficient.cu +++ b/stream_compaction/efficient.cu @@ -3,6 +3,8 @@ #include "common.h" #include "efficient.h" +#define blockSize 256 + namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; @@ -11,14 +13,67 @@ namespace StreamCompaction { static PerformanceTimer timer; return timer; } + __global__ void kernUpSweep(int n, int* data, int offset) + { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + + if (index % (2 * offset) == 0) { + int desIdx = index + (2 * offset) - 1; + int srcIdx = index + offset - 1; + + data[desIdx] += data[srcIdx]; + } + } + + __global__ void kernDownSweep(int n, int* data, int offset) + { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + if (index % (2 * offset) == 0) { + int t = data[index + offset - 1]; + data[index + offset - 1] = data[index + offset * 2 - 1]; + data[index + offset * 2 - 1] += t; + } + } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ - void scan(int n, int *odata, const int *idata) { + void scan(int n, int *odata, const int *idata) + { + int maxDepth = ilog2ceil(n); + int maxSize = pow(2, maxDepth); + dim3 fullBlocksPerGrid((maxSize + blockSize - 1) / blockSize); + + int *dev_data; + cudaMalloc((void**)&dev_data, maxSize * sizeof(int)); + cudaMemcpy(dev_data, idata, maxSize * sizeof(int), cudaMemcpyHostToDevice); + timer().startGpuTimer(); // TODO + // UpSweep + for (int d = 0; d < maxDepth; d++) { + kernUpSweep << < fullBlocksPerGrid, blockSize >> > (maxSize, dev_data, pow(2, d)); + } + + cudaMemset(dev_data + maxSize - 1, 0, sizeof(int)); + + // DownSweep + for (int d = maxDepth - 1; d >= 0; d--) { + kernDownSweep << < fullBlocksPerGrid, blockSize >> > (maxSize, dev_data, pow(2, d)); + } + timer().endGpuTimer(); + + cudaMemcpy(odata, dev_data, n * sizeof(int), cudaMemcpyDeviceToHost); + + // free cuda memory + cudaFree(dev_data); } /** @@ -30,11 +85,68 @@ namespace StreamCompaction { * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ - int compact(int n, int *odata, const int *idata) { + int compact(int n, int *odata, const int *idata) + { + int *dev_idata, *dev_odata, *dev_bool, *dev_idx; + + int maxDepth = ilog2ceil(n); + int maxSize = pow(2, maxDepth); + + dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); + dim3 maxBlocksPerGrid((maxSize + blockSize - 1) / blockSize); + + cudaMalloc((void**)&dev_idata, maxSize * sizeof(int)); + cudaMalloc((void**)&dev_odata, maxSize * sizeof(int)); + cudaMalloc((void**)&dev_bool, maxSize * sizeof(int)); + cudaMalloc((void**)&dev_idx, maxSize * sizeof(int)); + + cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + timer().startGpuTimer(); // TODO + + Common::kernMapToBoolean << > > (n, dev_bool, dev_idata); + cudaMemcpy(dev_idx, dev_bool, maxSize * sizeof(int), cudaMemcpyDeviceToDevice); + + // Scan + // UpSweep + for (int d = 0; d <= maxDepth - 1; d++) { + kernUpSweep << < maxBlocksPerGrid, blockSize >> > (maxSize, dev_idx, pow(2, d)); + } + + cudaMemset(dev_idx + maxSize - 1, 0, sizeof(int)); + + // DownSweep + for (int d = maxDepth - 1; d >= 0; d--) { + kernDownSweep << < maxBlocksPerGrid, blockSize >> > (maxSize, dev_idx, pow(2, d)); + } + + // Scatter + //scatter + Common::kernScatter << < fullBlocksPerGrid, blockSize >> > (maxSize, dev_odata, dev_idata, dev_bool, dev_idx); + + timer().endGpuTimer(); - return -1; + + cudaMemcpy(odata, dev_odata, sizeof(int) * n, cudaMemcpyDeviceToHost); + + // compute num of non-zero element + int* arr = new int[maxSize]; + cudaMemcpy(arr, dev_bool, sizeof(int) * maxSize, cudaMemcpyDeviceToHost); + + int count = 0; + for (int i = 0; i < maxSize; i++) { + if (arr[i] == 1) { + count++; + } + } + // Free cuda memory + cudaFree(dev_idata); + cudaFree(dev_odata); + cudaFree(dev_bool); + cudaFree(dev_idx); + + return count; } } } diff --git a/stream_compaction/naive.cu b/stream_compaction/naive.cu index 4308876..787fa8c 100644 --- a/stream_compaction/naive.cu +++ b/stream_compaction/naive.cu @@ -12,14 +12,56 @@ namespace StreamCompaction { return timer; } // TODO: __global__ + __global__ void kernNaiveScan(int n, int* odata, int* idata, int stride) + { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + if (index >= stride) { + odata[index] = idata[index - stride] + idata[index]; + } + else { + odata[index] = idata[index]; + } + } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + int blockSize = 128; + + int* dev_idata; + int* dev_odata; + + dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); + + cudaMalloc((void**)&dev_idata, n * sizeof(int)); + cudaMalloc((void**)&dev_odata, n * sizeof(int)); + cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + timer().startGpuTimer(); // TODO + for (int d = 1; d <= ilog2ceil(n); d++) { + kernNaiveScan << < fullBlocksPerGrid, blockSize >> > (n, dev_odata, dev_idata, pow(2.0,d-1)); + + // ping-pong buffer + int* tmp = dev_idata; + dev_idata = dev_odata; + dev_odata = tmp; + } + timer().endGpuTimer(); + + // covert from inclusive scan to exclusive scan + // copy the memory from the second index and manually set identity to the first element + cudaMemcpy(odata + 1, dev_idata, (n-1) * sizeof(int), cudaMemcpyDeviceToHost); + odata[0] = 0; + + // free cuda memory + cudaFree(dev_odata); + cudaFree(dev_idata); } } } diff --git a/stream_compaction/thrust.cu b/stream_compaction/thrust.cu index 1def45e..698465a 100644 --- a/stream_compaction/thrust.cu +++ b/stream_compaction/thrust.cu @@ -18,11 +18,16 @@ namespace StreamCompaction { * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + thrust::device_vector dv_in(idata, idata + n); + thrust::device_vector dv_out(n); timer().startGpuTimer(); // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + timer().endGpuTimer(); + thrust::copy(dv_out.begin(), dv_out.end(), odata); } } }