diff --git a/README.md b/README.md index 0e38ddb..c5180a8 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,84 @@ CUDA Stream Compaction **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2** -* (TODO) YOUR NAME HERE - * (TODO) [LinkedIn](), [personal website](), [twitter](), etc. -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Haoyu Sui + * [LinkedIn](http://linkedin.com/in/haoyu-sui-721284192) +* Tested on: Windows 10, i5-9600K @ 3.70GHz 16GB, RTX 2070 SUPER 8GB +* SM:7.5 -### (TODO: Your README) +### Features +* CPU Scan & Stream Compaction +* Naive GPU Scan Algorithm +* Work-Efficient GPU Scan & Stream Compaction +* Using Thrust's Implementation -Include analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) +### Analysis +**Roughly optimize for each implementations** + +In order to find the appropriate block size to make each method produce the best performance, I tried several different block sizes for each method: 32, 64, 128, 256, 512. The results can be seen in the table below. (Array size is 2^20) + +![](img/BlockSize.png) + +**Performance analysis** + +I chose a block size of 128, and then performed a different array size performance test for each method. It can be seen from the figure that when the array size is relatively large, the performance advantage of GPU is more obvious. Among the three different GPU methods, Thrust Scan and Efficient Scan have better performance than Naive Scan. + +![](img/ArraySize.png) + + +### Output in cmd +``` +**************** +** SCAN TESTS ** +**************** + [ 8 9 43 42 23 4 8 38 23 10 27 11 44 ... 45 0 ] +==== cpu scan, power-of-two ==== + elapsed time: 3.9358ms (std::chrono Measured) + [ 0 8 17 60 102 125 129 137 175 198 208 235 246 ... 25659338 25659383 ] +==== cpu scan, non-power-of-two ==== + elapsed time: 1.4142ms (std::chrono Measured) + [ 0 8 17 60 102 125 129 137 175 198 208 235 246 ... 25659264 25659282 ] + passed +==== naive scan, power-of-two ==== + elapsed time: 0.485664ms (CUDA Measured) + passed +==== naive scan, non-power-of-two ==== + elapsed time: 0.485152ms (CUDA Measured) + passed +==== work-efficient scan, power-of-two ==== + elapsed time: 0.219872ms (CUDA Measured) + passed +==== work-efficient scan, non-power-of-two ==== + elapsed time: 0.19888ms (CUDA Measured) + passed +==== thrust scan, power-of-two ==== + elapsed time: 0.204576ms (CUDA Measured) + passed +==== thrust scan, non-power-of-two ==== + elapsed time: 0.214432ms (CUDA Measured) + passed + +***************************** +** STREAM COMPACTION TESTS ** +***************************** + [ 1 1 1 2 1 1 0 1 2 0 1 3 1 ... 0 0 ] +==== cpu compact without scan, power-of-two ==== + elapsed time: 2.5239ms (std::chrono Measured) + [ 1 1 1 2 1 1 1 2 1 3 1 2 3 ... 1 1 ] + passed +==== cpu compact without scan, non-power-of-two ==== + elapsed time: 2.2133ms (std::chrono Measured) + [ 1 1 1 2 1 1 1 2 1 3 1 2 3 ... 1 1 ] + passed +==== cpu compact with scan ==== + elapsed time: 8.8561ms (std::chrono Measured) + [ 1 1 1 2 1 1 1 2 1 3 1 2 3 ... 1 1 ] + passed +==== work-efficient compact, power-of-two ==== + elapsed time: 1.19734ms (CUDA Measured) + passed +==== work-efficient compact, non-power-of-two ==== + elapsed time: 0.7928ms (CUDA Measured) + passed +``` \ No newline at end of file diff --git a/img/ArraySize.png b/img/ArraySize.png new file mode 100644 index 0000000..6884270 Binary files /dev/null and b/img/ArraySize.png differ diff --git a/img/BlockSize.png b/img/BlockSize.png new file mode 100644 index 0000000..8ee66ca Binary files /dev/null and b/img/BlockSize.png differ diff --git a/src/main.cpp b/src/main.cpp index 896ac2b..600c29f 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -13,7 +13,7 @@ #include #include "testing_helpers.hpp" -const int SIZE = 1 << 8; // feel free to change the size of array +const int SIZE = 1 << 20; // feel free to change the size of array const int NPOT = SIZE - 3; // Non-Power-Of-Two int *a = new int[SIZE]; int *b = new int[SIZE]; diff --git a/stream_compaction/common.cu b/stream_compaction/common.cu index 2ed6d63..336bfc4 100644 --- a/stream_compaction/common.cu +++ b/stream_compaction/common.cu @@ -24,6 +24,12 @@ namespace StreamCompaction { */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) { + return; + } + + bools[index] = idata[index] != 0 ? 1 : 0; } /** @@ -33,7 +39,16 @@ namespace StreamCompaction { __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO - } + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) { + return; + } + if(bools[index] == 1) + { + int oIndex = indices[index]; + odata[oIndex] = idata[index]; + } + } } } diff --git a/stream_compaction/common.h b/stream_compaction/common.h index d2c1fed..c4ca0de 100644 --- a/stream_compaction/common.h +++ b/stream_compaction/common.h @@ -13,6 +13,8 @@ #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) +#define blockSize 128 + /** * Check for CUDA errors; print and exit if there was a problem. */ diff --git a/stream_compaction/cpu.cu b/stream_compaction/cpu.cu index 719fa11..1beaf36 100644 --- a/stream_compaction/cpu.cu +++ b/stream_compaction/cpu.cu @@ -20,6 +20,11 @@ namespace StreamCompaction { void scan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + odata[0] = 0; + for (int i = 1; i < n; ++i) + { + odata[i] = idata[i - 1] + odata[i - 1]; + } timer().endCpuTimer(); } @@ -31,8 +36,17 @@ namespace StreamCompaction { int compactWithoutScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + int oIndex = 0; + for(int i = 0; i < n; ++i) + { + if(idata[i] != 0) + { + odata[oIndex] = idata[i]; + oIndex ++; + } + } timer().endCpuTimer(); - return -1; + return oIndex; } /** @@ -41,10 +55,44 @@ namespace StreamCompaction { * @returns the number of elements remaining after compaction. */ int compactWithScan(int n, int *odata, const int *idata) { + int *mappedArr = new int[n]; + int *scannedArr = new int[n]; timer().startCpuTimer(); - // TODO + + // Compute temporary array containing 1 and 0 + for(int i = 0; i < n; ++i) + { + if(idata[i] != 0) + { + mappedArr[i] = 1; + } + else + { + mappedArr[i] = 0; + } + } + + // Run exclusive scan on mapped array + scannedArr[0] = 0; + for (int i = 1; i < n; ++i) + { + scannedArr[i] = mappedArr[i - 1] + scannedArr[i - 1]; + } + + // Scatter + int oCount = 0; + for(int i = 0; i < n; ++i) + { + if(mappedArr[i] != 0) + { + int index = scannedArr[i]; + odata[index] = idata[i]; + oCount ++; + } + } + timer().endCpuTimer(); - return -1; + return oCount; } } } diff --git a/stream_compaction/efficient.cu b/stream_compaction/efficient.cu index 2db346e..c209dca 100644 --- a/stream_compaction/efficient.cu +++ b/stream_compaction/efficient.cu @@ -12,13 +12,83 @@ namespace StreamCompaction { return timer; } + __global__ void kernUpSweep(int n_pot, int* data, int d) + { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + + int temp_d = 1 << (d + 1); + int k = index * temp_d; + + if (k >= n_pot) { + return; + } + + int power1 = 1 << (d + 1); + int power2 = 1 << d; + data[k + power1 - 1] += data[k + power2 - 1]; + } + + __global__ void kernDownSweep(int n_pot, int* data, int d) + { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + + int temp_d = 1 << (d + 1); + int k = index * temp_d; + + if (k >= n_pot) { + return; + } + + int power1 = 1 << (d + 1); + int power2 = 1 << d; + int t = data[k + power2 - 1]; + data[k + power2 - 1] = data[k + power1 - 1]; + data[k + power1 - 1] += t; + } + /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + int *dev_data; + + // Get power of two length + int logValue = ilog2ceil(n); + int n_pot = 1 << logValue; + + // CUDA memory arrangement and error checking + cudaMalloc((void**)&dev_data, n_pot * sizeof(int)); + checkCUDAError("cudaMalloc dev_data failed!"); + + cudaMemset(dev_data, 0, n_pot * sizeof(int)); + checkCUDAError("cudaMemset dev_data failed!"); + + cudaMemcpy(dev_data, idata, n * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy from idata to dev_data failed!"); + timer().startGpuTimer(); - // TODO + + // Up-Sweep + for(int d = 0; d <= ilog2ceil(n) - 1; ++d) + { + dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize); + kernUpSweep<<>>(n_pot, dev_data, d); + } + + // Down-Sweep + cudaMemset(dev_data + n_pot - 1, 0, sizeof(int)); + for(int d = ilog2ceil(n) - 1; d >=0; --d) + { + dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize); + kernDownSweep<<>>(n_pot, dev_data, d); + } + timer().endGpuTimer(); + + cudaMemcpy(odata, dev_data, n * sizeof(int), cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy from dev_data to idata failed!"); + + cudaFree(dev_data); } /** @@ -31,10 +101,78 @@ namespace StreamCompaction { * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { + + int* dev_idata; + int* dev_boolData; + int* dev_indices; + int* dev_odata; + + + int logValue = ilog2ceil(n); + int n_pot = 1 << logValue; + + cudaMalloc((void**)&dev_idata, n_pot * sizeof(int)); + checkCUDAError("cudaMalloc dev_idata failed!"); + + cudaMalloc((void**)&dev_boolData, n_pot * sizeof(int)); + checkCUDAError("cudaMalloc dev_boolData failed!"); + + cudaMalloc((void**)&dev_indices, n_pot * sizeof(int)); + checkCUDAError("cudaMalloc dev_indices failed!"); + + cudaMemset(dev_idata, 0, n_pot * sizeof(int)); + checkCUDAError("cudaMemset dev_idata failed!"); + + cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy from idata to dev_idata failed!"); + timer().startGpuTimer(); - // TODO + + dim3 mapBlocksPerGrid((n_pot + blockSize - 1) / blockSize); + + // Compute temporary array containing 1 and 0 + Common::kernMapToBoolean<<>>(n_pot, dev_boolData, dev_idata); + + cudaMemcpy(dev_indices, dev_boolData, n_pot * sizeof(int), cudaMemcpyDeviceToDevice); + checkCUDAError("cudaMemcpy from dev_boolData to dev_indices failed!"); + + // Run exclusive scan on mapped array + // Up-Sweep + for(int d = 0; d <= ilog2ceil(n) - 1; ++d) + { + dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize); + kernUpSweep<<>>(n_pot, dev_indices, d); + } + + // Down-Sweep + cudaMemset(dev_indices + n_pot - 1, 0, sizeof(int)); + for(int d = ilog2ceil(n) - 1; d >=0; --d) + { + dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize); + kernDownSweep<<>>(n_pot, dev_indices, d); + } + + // Scatter + + int arrayCount = 0; + cudaMemcpy(&arrayCount, dev_indices + n_pot - 1, sizeof(int), cudaMemcpyDeviceToHost); + + cudaMalloc((void**)&dev_odata, arrayCount * sizeof(int)); + checkCUDAError("cudaMalloc dev_odata failed!"); + + Common::kernScatter<<>>(n_pot, dev_odata, dev_idata, dev_boolData, dev_indices); + timer().endGpuTimer(); - return -1; + + cudaMemcpy(odata, dev_odata, arrayCount * sizeof(int), cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy from dev_odata to odata failed!"); + + cudaFree(dev_idata); + cudaFree(dev_boolData); + cudaFree(dev_indices); + cudaFree(dev_odata); + + return arrayCount; } } } diff --git a/stream_compaction/naive.cu b/stream_compaction/naive.cu index 4308876..4cb0015 100644 --- a/stream_compaction/naive.cu +++ b/stream_compaction/naive.cu @@ -12,14 +12,68 @@ namespace StreamCompaction { return timer; } // TODO: __global__ + // This performs inclusive scan + __global__ void kernNaiveScan(int d, int n, int *odata, int* idata) + { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) { + return; + } + + int d_offset = 1 << (d - 1); + + if(index >= d_offset) + { + odata[index] = idata[index - d_offset] + idata[index]; + } + else + { + odata[index] = idata[index]; + } + } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { - timer().startGpuTimer(); + // TODO + int *dev_idata, *dev_odata; + + + //dim3 threadsPerBlock(blockSize); + dim3 blocksPerGrid((n + blockSize - 1) / blockSize); + + // CUDA memory management and error checking + cudaMalloc((void**)&dev_idata, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_idata failed!"); + + cudaMalloc((void**)&dev_odata, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_odata failed!"); + + cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy from idata to dev_idata failed!"); + + timer().startGpuTimer(); + + for(int d = 1; d <= ilog2ceil(n); ++d) + { + kernNaiveScan<<>>(d, n, dev_odata, dev_idata); + + int *dev_temp = dev_idata; + dev_idata = dev_odata; + dev_odata = dev_temp; + } + timer().endGpuTimer(); + + // Right shift copy to achieve exclusive scan + odata[0] = 0; + cudaMemcpy(odata + 1, dev_idata, (n - 1) * sizeof(int), cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy from dev_idata to odata + 1 failed!"); + + cudaFree(dev_idata); + cudaFree(dev_odata); } } } diff --git a/stream_compaction/thrust.cu b/stream_compaction/thrust.cu index 1def45e..c5ca0c6 100644 --- a/stream_compaction/thrust.cu +++ b/stream_compaction/thrust.cu @@ -18,11 +18,17 @@ namespace StreamCompaction { * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + thrust::host_vector host_idata(idata, idata + n); + thrust::host_vector host_odata(n); + + thrust::device_vector dev_idata = host_idata; + thrust::device_vector dev_odata = host_odata; + timer().startGpuTimer(); - // TODO use `thrust::exclusive_scan` - // example: for device_vectors dv_in and dv_out: - // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin()); timer().endGpuTimer(); + + thrust::copy(dev_odata.begin(), dev_odata.end(), odata); } } }