diff --git a/README.md b/README.md index 0e38ddb..85357bb 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,108 @@ CUDA Stream Compaction **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2** -* (TODO) YOUR NAME HERE - * (TODO) [LinkedIn](), [personal website](), [twitter](), etc. -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Zijing Peng + - [LinkedIn](https://www.linkedin.com/in/zijing-peng/) + - [personal website](https://zijingpeng.github.io/) +* Tested on: Windows 22, i7-8750H@ 2.22GHz 16GB, NVIDIA GeForce GTX 1060 + +### Summary + +In this project, I've implemented GPU stream compaction in CUDA, from scratch. The goal of stream compaction is to remove `0`s from an array of `int`s. The stream compaction includes three steps: map, scan and scatter. I've also implemented several exclusive scan algorithms, which produces a prefix sum array of a given array. + +All the implementations including: + +- Scan algorithms + - CPU Scan & Stream Compaction + - Naive GPU Scan + - Work-Efficient GPU Scan + - Thrust Scan +- Stream Compaction algorithms + - CPU Stream Compaction + - Work-Efficient GPU Stream Compaction + +### Scan Performance Analysis + +![](/img/scan.png) + + + +The four scan algorithm implementations experience huge performance loss as the size of data increase, especially when the size is over 1M. The CPU scan has good performance when the dataset is small. It runs faster than 3 other scan when the size is smaller than 4K, but after that its performance lose rapidly. When the size increases to 16 M,it is much worse than all other GPU implementations. That is because GPU is designed for thousands of computation in parallel. CPU has limited threads, and there are some optimizations in the OS, so it could run pretty fast when the dataset is small. But when the dataset is super large, it will experience huge performance loss. + +Compared with naive scan, work-efficient scan is not so efficient, it even a little bit worse. As Part 5 discussed, more optimizations could be done to improve the performance of work-efficient scan. + +Among the four implementations, thrust scan is undoubtedly the best. When the dataset is small, the advantage of thrust scan is not so obvious compared to others. However, it the only one that still has good performance when the size of dataset is 16M. I take a look at the Nsight timeline for its execution. I find there are several `cudaDeviceSynchronize` function calls, which means they use shared memory. Moreover, I find that in thrust implementation the kernel is only called once (while the up/down sweep of work-efficient is called 24 times with the same data size). The thrust scan use 40 registers per thread while my implementation only use 16 registers per thread. Thus, I guess it take advantage of shared memory and registers. + +### Stream Compaction Performance Analysis + +![](/img/compact.png) + +The two implementations both experience huge performance loss as the size of data increase, especially when the size. As we have discussed above, the CPU implementation has good performance when the dataset is small and experience huge performance when dataset greatly increase. While the GPU compaction shows great performance when the size is over 1M. + +### Output + +An out put when `SIZE = 256` and `blockSize = 512` . + +``` +**************** +** SCAN TESTS ** +**************** + [ 39 42 3 23 38 47 7 10 32 49 44 21 25 ... 9 0 ] +==== cpu scan, power-of-two ==== + elapsed time: 0.0007ms (std::chrono Measured) + [ 0 39 81 84 107 145 192 199 209 241 290 334 355 ... 6130 6139 ] +==== cpu scan, non-power-of-two ==== + elapsed time: 0.0005ms (std::chrono Measured) + [ 0 39 81 84 107 145 192 199 209 241 290 334 355 ... 6074 6110 ] + passed +==== naive scan, power-of-two ==== + elapsed time: 0.018816ms (CUDA Measured) + passed +==== naive scan, non-power-of-two ==== + elapsed time: 0.018272ms (CUDA Measured) + passed +==== work-efficient scan, power-of-two ==== + elapsed time: 0.081056ms (CUDA Measured) + passed +==== work-efficient scan, non-power-of-two ==== + elapsed time: 0.044896ms (CUDA Measured) + passed +==== thrust scan, power-of-two ==== + elapsed time: 0.054528ms (CUDA Measured) + passed +==== thrust scan, non-power-of-two ==== + elapsed time: 0.054112ms (CUDA Measured) + passed + +***************************** +** STREAM COMPACTION TESTS ** +***************************** + [ 1 2 3 1 0 3 3 0 2 3 2 3 3 ... 3 0 ] +==== cpu compact without scan, power-of-two ==== + [ 1 2 3 1 3 3 2 3 2 3 3 2 2 ... 1 3 ] + passed +==== cpu compact without scan, non-power-of-two ==== + elapsed time: 0.0006ms (std::chrono Measured) + [ 1 2 3 1 3 3 2 3 2 3 3 2 2 ... 3 1 ] + passed +==== cpu compact with scan ==== + elapsed time: 0.004ms (std::chrono Measured) + [ 1 2 3 1 3 3 2 3 2 3 3 2 2 ... 1 3 ] + passed +==== work-efficient compact, power-of-two ==== + elapsed time: 0.092768ms (CUDA Measured) + passed +==== work-efficient compact, non-power-of-two ==== + elapsed time: 0.125632ms (CUDA Measured) + passed + +``` + + + + + + -### (TODO: Your README) -Include analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) diff --git a/img/compact.png b/img/compact.png new file mode 100644 index 0000000..b788d09 Binary files /dev/null and b/img/compact.png differ diff --git a/img/scan.png b/img/scan.png new file mode 100644 index 0000000..eabb20c Binary files /dev/null and b/img/scan.png differ diff --git a/stream_compaction/common.cu b/stream_compaction/common.cu index 2ed6d63..f15f11a 100644 --- a/stream_compaction/common.cu +++ b/stream_compaction/common.cu @@ -23,7 +23,13 @@ namespace StreamCompaction { * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { - // TODO + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) { + return; + } + if (idata[index] != 0) { + bools[index] = 1; + } } /** @@ -32,8 +38,13 @@ namespace StreamCompaction { */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { - // TODO + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) { + return; + } + if (bools[index] == 1) { + odata[indices[index]] = idata[index]; + } } - } } diff --git a/stream_compaction/cpu.cu b/stream_compaction/cpu.cu index 719fa11..56e3104 100644 --- a/stream_compaction/cpu.cu +++ b/stream_compaction/cpu.cu @@ -12,6 +12,13 @@ namespace StreamCompaction { return timer; } + void cpu_scan(int n, int* odata, const int* idata) { + odata[0] = 0; + for (int i = 0; i < n - 1; i++) { + odata[i + 1] = odata[i] + idata[i]; + } + } + /** * CPU scan (prefix sum). * For performance analysis, this is supposed to be a simple for loop. @@ -19,7 +26,7 @@ namespace StreamCompaction { */ void scan(int n, int *odata, const int *idata) { timer().startCpuTimer(); - // TODO + cpu_scan(n, odata, idata); timer().endCpuTimer(); } @@ -30,9 +37,14 @@ namespace StreamCompaction { */ int compactWithoutScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); - // TODO + int o = 0; + for (int i = 0; i < n; i++) { + if (idata[i]) { + odata[o++] = idata[i]; + } + } timer().endCpuTimer(); - return -1; + return o; } /** @@ -42,9 +54,23 @@ namespace StreamCompaction { */ int compactWithScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); - // TODO + int* map = new int[n]; + for (int i = 0; i < n; i++) { + map[i] = idata[i] ? 1 : 0; + } + int* sout = new int[n]; + cpu_scan(n, sout, map); + int o = 0; + for (int i = 0; i < n; i++) { + if (map[i] != 0) { + odata[sout[i]] = idata[i]; + o++; + } + } + delete[] map; + delete[] sout; timer().endCpuTimer(); - return -1; + return o; } } } diff --git a/stream_compaction/efficient.cu b/stream_compaction/efficient.cu index 2db346e..0c469ba 100644 --- a/stream_compaction/efficient.cu +++ b/stream_compaction/efficient.cu @@ -12,13 +12,63 @@ namespace StreamCompaction { return timer; } + int* dev_buffer; + int* dev_booleanBuffer; + int* dev_scanBuffer; + int* dev_idata; + int* dev_odata; + + __global__ void kernUpSweep(int N, int offset, int* data) { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= N) { + return; + } + if (index % (offset * 2) == 0) { + data[index + offset * 2 - 1] += data[index + offset - 1]; + } + } + + __global__ void kernDownSweep(int N, int offset, int* data) { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= N) { + return; + } + if (index % (offset * 2) == 0) { + int t = data[index + offset - 1]; + data[index + offset - 1] = data[index + offset * 2 - 1]; + data[index + offset * 2 - 1] += t; + } + } + + /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + int dmax = ilog2ceil(n); + int numObjects = powf(2, dmax); + cudaMalloc((void**)&dev_buffer, numObjects * sizeof(int)); + cudaMemcpy(dev_buffer, idata, n * sizeof(int), cudaMemcpyHostToDevice); + + const int blockSize = 512; + dim3 numBlocks((numObjects + blockSize - 1) / blockSize); + timer().startGpuTimer(); - // TODO + + for (int i = 0; i < dmax; i++) { + kernUpSweep << > > (numObjects, int(powf(2, i)), dev_buffer); + } + + cudaMemset(dev_buffer + numObjects - 1, 0, sizeof(int)); + for (int i = dmax - 1; i >= 0; i--) { + kernDownSweep << > > (numObjects, int(powf(2, i)), dev_buffer); + } + timer().endGpuTimer(); + + cudaMemcpy(odata, dev_buffer, n * sizeof(int), cudaMemcpyDeviceToHost); + + cudaFree(dev_buffer); } /** @@ -31,10 +81,50 @@ namespace StreamCompaction { * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { + int dmax = ilog2ceil(n); + int numObjects = powf(2, dmax); + + cudaMalloc((void**)&dev_idata, n * sizeof(int)); + cudaMalloc((void**)&dev_odata, n * sizeof(int)); + cudaMalloc((void**)&dev_booleanBuffer, n * sizeof(int)); + cudaMalloc((void**)&dev_scanBuffer, numObjects * sizeof(int)); + + cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + + const int blockSize = 512; + + dim3 numBlocks((n + blockSize - 1) / blockSize); + timer().startGpuTimer(); - // TODO + + Common::kernMapToBoolean << > > (n, dev_booleanBuffer, dev_idata); + + cudaMemcpy(dev_scanBuffer, dev_booleanBuffer, n * sizeof(int), cudaMemcpyDeviceToDevice); + + for (int i = 0; i < dmax; i++) { + kernUpSweep << > > (numObjects, int(powf(2, i)), dev_scanBuffer); + } + + cudaMemset(dev_scanBuffer + numObjects - 1, 0, sizeof(int)); + for (int i = dmax - 1; i >= 0; i--) { + kernDownSweep << > > (numObjects, int(powf(2, i)), dev_scanBuffer); + } + + int size = 0; + cudaMemcpy(&size, dev_scanBuffer + n - 1, sizeof(int), cudaMemcpyDeviceToHost); + + Common::kernScatter << > > (n, dev_odata, dev_idata, dev_booleanBuffer, dev_scanBuffer); + timer().endGpuTimer(); - return -1; + + cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost); + + cudaFree(dev_idata); + cudaFree(dev_odata); + cudaFree(dev_booleanBuffer); + cudaFree(dev_scanBuffer); + + return idata[n - 1] ? size + 1 : size; } } } diff --git a/stream_compaction/naive.cu b/stream_compaction/naive.cu index 4308876..6256c52 100644 --- a/stream_compaction/naive.cu +++ b/stream_compaction/naive.cu @@ -11,15 +11,57 @@ namespace StreamCompaction { static PerformanceTimer timer; return timer; } - // TODO: __global__ + + int* dev_bufferA; + int* dev_bufferB; + int numObjects; + + __global__ void kernNaiveScan(int N, int* A, int* B, int temp) { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= N) { + return; + } + if (index < temp) { + A[index] = B[index]; + return; + } + A[index] = B[index - temp] + B[index]; + } + + void initSimulation(int N, const int* B) { + numObjects = N; + cudaMalloc((void**)&dev_bufferA, N * sizeof(int)); + cudaMalloc((void**)&dev_bufferB, N * sizeof(int)); + cudaMemcpy(dev_bufferB, B, N * sizeof(int), cudaMemcpyHostToDevice); + } + + void endSimulation() { + cudaFree(dev_bufferA); + cudaFree(dev_bufferB); + } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + initSimulation(n, idata); + const int blockSize = 256; + dim3 numBoidBlocks((n + blockSize - 1) / blockSize); + int dmax = ilog2ceil(n); + timer().startGpuTimer(); - // TODO + + for (int i = 1; i <= dmax; i++) { + kernNaiveScan << > > (n, dev_bufferA, dev_bufferB, int(powf(2, i - 1))); + std::swap(dev_bufferA, dev_bufferB); + } + timer().endGpuTimer(); + + cudaMemcpy(odata + 1, dev_bufferB, (n - 1) * sizeof(int), cudaMemcpyDeviceToHost); + + odata[0] = 0; + endSimulation(); } } } diff --git a/stream_compaction/thrust.cu b/stream_compaction/thrust.cu index 1def45e..6226afa 100644 --- a/stream_compaction/thrust.cu +++ b/stream_compaction/thrust.cu @@ -18,11 +18,15 @@ namespace StreamCompaction { * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + thrust::host_vector host_idata(idata, idata + n); + thrust::device_vector dev_idata(host_idata); + thrust::device_vector dev_odata(n); + timer().startGpuTimer(); - // TODO use `thrust::exclusive_scan` - // example: for device_vectors dv_in and dv_out: - // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin()); timer().endGpuTimer(); + + thrust::copy(dev_odata.begin(), dev_odata.end(), odata); } } }