diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..9c2ba4a --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "files.associations": { + "algorithm": "cpp", + "cstdio": "cpp" + } +} \ No newline at end of file diff --git a/README.md b/README.md index 0e38ddb..02ceac8 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,107 @@ CUDA Stream Compaction **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2** -* (TODO) YOUR NAME HERE - * (TODO) [LinkedIn](), [personal website](), [twitter](), etc. -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Wanru Zhao + * [LinkedIn](www.linkedin.com/in/wanru-zhao). +* Tested on: Windows 10, Intel(R) Xeon(R) CPU E5-1630 v4 @ 3.70GHz, GTX 1070 (SIG Lab) -### (TODO: Your README) +### Features -Include analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) + - CPU Scan & Stream Compaction + - Naive GPU Scan Algorithm + - Work-Efficient GPU Scan + - Work-Efficient GPU Stream Compaction + - Thrust Implementation + +### Extra Credits +- Work-Efficient GPU Scan Optimization: by reducing empty or rest threads, dynamically lauch kernal +- Radix Sort + +### Performance Analysis +#### Block size over GPU scan methods (Array size 256) + +![](img/scan_bs.JPG) + +Performed analysis of blocksize impact over Naive Scan(power of two/non-power of two) and work-efficient scan(power of two/non-power of two). It shows that given the same block size, work-efficient GPU scan will be always better than Naive scan. And the size of array, whether is power of two or not, has little impact on one implementation of same block size. And from the graph, the performance has little change over the block size. The rough optimal blocksize is 512 for each implementation. + +#### Array size over scan methods (Block size 1024) + +![](img/scan_as.JPG) + +From this graph, we can find, thrust scan has the best performance over changes of array size, and it keeps the same for 2^6 to 2^20 numbers. And as array size raises, CPU scan's performance decreases dramatically. And Naive is better than CPU, but worse than work-efficient scan. When array size is between 2^6 to 2^14, CPU method is better than two GPU scan methods, and when array size is between 2^14 to 2^16, work-efficient is better than CPU, and when array size is larger than 2^16, Naive becomes better than CPU. +Thrust methods may optimized memory allocation and accessing methods and how it devides the block to make the performance stable and best. + +#### Array size over compaction methods (Block size 1024) + +![](img/compact_as.JPG) +We can see from the graph that, CPU method with scan is the worst, since for compaction, it actually does O(n) for finding booleans, O(n) for doing the scan and O(n) for scattering, since simple compaction just needs O(n) time. And when array size is large enough, work-efficient compaction benefits from its parallism and performs the best. + +#### Radix Sort (Block size 1024, Array size 256) + +Time: 1.52576ms + +### Result +``` +**************** +** SCAN TESTS ** +**************** + [ 18 22 20 42 10 18 23 8 38 0 30 27 2 ... 27 0 ] +==== cpu scan, power-of-two ==== + elapsed time: 0.0011ms (std::chrono Measured) + [ 0 18 40 60 102 112 130 153 161 199 199 229 256 ... 6238 6265 ] +==== cpu scan, non-power-of-two ==== + elapsed time: 0.0008ms (std::chrono Measured) + [ 0 18 40 60 102 112 130 153 161 199 199 229 256 ... 6157 6161 ] + passed +==== naive scan, power-of-two ==== + elapsed time: 0.036864ms (CUDA Measured) + passed +==== naive scan, non-power-of-two ==== + elapsed time: 0.036864ms (CUDA Measured) + passed +==== work-efficient scan, power-of-two ==== + elapsed time: 0.070656ms (CUDA Measured) + passed +==== work-efficient scan, non-power-of-two ==== + elapsed time: 0.068608ms (CUDA Measured) + passed +==== thrust scan, power-of-two ==== + elapsed time: 0.001024ms (CUDA Measured) + passed +==== thrust scan, non-power-of-two ==== + elapsed time: 0.001024ms (CUDA Measured) + passed + [ 18 22 20 42 10 18 23 8 38 0 30 27 2 ... 27 35 ] +==== radix sort, power-of-two ==== + elapsed time: 1.52883ms (CUDA Measured) + [ 0 0 0 1 1 1 1 2 2 2 2 2 2 ... 49 49 ] + passed +==== radix sort, non-power-of-two ==== + elapsed time: 1.52576ms (CUDA Measured) + [ 0 0 0 1 1 1 1 2 2 2 2 2 2 ... 49 49 ] + passed + +***************************** +** STREAM COMPACTION TESTS ** +***************************** + [ 0 2 2 0 0 0 3 0 2 2 2 1 0 ... 1 0 ] +==== cpu compact without scan, power-of-two ==== + elapsed time: 0.0015ms (std::chrono Measured) + [ 2 2 3 2 2 2 1 1 3 1 3 1 1 ... 1 1 ] + passed +==== cpu compact without scan, non-power-of-two ==== + elapsed time: 0.0015ms (std::chrono Measured) + [ 2 2 3 2 2 2 1 1 3 1 3 1 1 ... 3 1 ] + passed +==== cpu compact with scan ==== + elapsed time: 0.0062ms (std::chrono Measured) + [ 2 2 3 2 2 2 1 1 3 1 3 1 1 ... 1 1 ] + passed +==== work-efficient compact, power-of-two ==== + elapsed time: 0.09216ms (CUDA Measured) + passed +==== work-efficient compact, non-power-of-two ==== + elapsed time: 0.090112ms (CUDA Measured) + passed +``` diff --git a/img/compact_as.JPG b/img/compact_as.JPG new file mode 100644 index 0000000..6a75493 Binary files /dev/null and b/img/compact_as.JPG differ diff --git a/img/scan_as.JPG b/img/scan_as.JPG new file mode 100644 index 0000000..1f6a4b8 Binary files /dev/null and b/img/scan_as.JPG differ diff --git a/img/scan_bs.JPG b/img/scan_bs.JPG new file mode 100644 index 0000000..8b1551b Binary files /dev/null and b/img/scan_bs.JPG differ diff --git a/result.txt b/result.txt new file mode 100644 index 0000000..89d4664 --- /dev/null +++ b/result.txt @@ -0,0 +1,62 @@ + +**************** +** SCAN TESTS ** +**************** + [ 18 22 20 42 10 18 23 8 38 0 30 27 2 ... 27 0 ] +==== cpu scan, power-of-two ==== + elapsed time: 0.0011ms (std::chrono Measured) + [ 0 18 40 60 102 112 130 153 161 199 199 229 256 ... 6238 6265 ] +==== cpu scan, non-power-of-two ==== + elapsed time: 0.0008ms (std::chrono Measured) + [ 0 18 40 60 102 112 130 153 161 199 199 229 256 ... 6157 6161 ] + passed +==== naive scan, power-of-two ==== + elapsed time: 0.036864ms (CUDA Measured) + passed +==== naive scan, non-power-of-two ==== + elapsed time: 0.036864ms (CUDA Measured) + passed +==== work-efficient scan, power-of-two ==== + elapsed time: 0.070656ms (CUDA Measured) + passed +==== work-efficient scan, non-power-of-two ==== + elapsed time: 0.068608ms (CUDA Measured) + passed +==== thrust scan, power-of-two ==== + elapsed time: 0.001024ms (CUDA Measured) + passed +==== thrust scan, non-power-of-two ==== + elapsed time: 0.001024ms (CUDA Measured) + passed + [ 18 22 20 42 10 18 23 8 38 0 30 27 2 ... 27 35 ] +==== radix sort, power-of-two ==== + elapsed time: 1.52883ms (CUDA Measured) + [ 0 0 0 1 1 1 1 2 2 2 2 2 2 ... 49 49 ] + passed +==== radix sort, non-power-of-two ==== + elapsed time: 1.52576ms (CUDA Measured) + [ 0 0 0 1 1 1 1 2 2 2 2 2 2 ... 49 49 ] + passed + +***************************** +** STREAM COMPACTION TESTS ** +***************************** + [ 0 2 2 0 0 0 3 0 2 2 2 1 0 ... 1 0 ] +==== cpu compact without scan, power-of-two ==== + elapsed time: 0.0015ms (std::chrono Measured) + [ 2 2 3 2 2 2 1 1 3 1 3 1 1 ... 1 1 ] + passed +==== cpu compact without scan, non-power-of-two ==== + elapsed time: 0.0015ms (std::chrono Measured) + [ 2 2 3 2 2 2 1 1 3 1 3 1 1 ... 3 1 ] + passed +==== cpu compact with scan ==== + elapsed time: 0.0062ms (std::chrono Measured) + [ 2 2 3 2 2 2 1 1 3 1 3 1 1 ... 1 1 ] + passed +==== work-efficient compact, power-of-two ==== + elapsed time: 0.09216ms (CUDA Measured) + passed +==== work-efficient compact, non-power-of-two ==== + elapsed time: 0.090112ms (CUDA Measured) + passed \ No newline at end of file diff --git a/src/main.cpp b/src/main.cpp index 1850161..94f7dbf 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -7,18 +7,26 @@ */ #include +#include #include #include #include #include +#include #include "testing_helpers.hpp" +#define RADIX + const int SIZE = 1 << 8; // feel free to change the size of array const int NPOT = SIZE - 3; // Non-Power-Of-Two int *a = new int[SIZE]; int *b = new int[SIZE]; int *c = new int[SIZE]; +int *d = new int[SIZE]; +int *e = new int[NPOT]; +int *f = new int[SIZE]; + int main(int argc, char* argv[]) { // Scan tests @@ -67,6 +75,20 @@ int main(int argc, char* argv[]) { //printArray(SIZE, c, true); printCmpResult(NPOT, b, c); + //zeroArray(SIZE, c); + //printDesc("naive scan, power-of-two, shared memory"); + //StreamCompaction::Naive::shared_scan(SIZE, c, a); + //printElapsedTime(StreamCompaction::Naive::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); + //printArray(SIZE, c, true); + //printCmpResult(SIZE, b, c); + + //zeroArray(SIZE, c); + //printDesc("naive scan, non-power-of-two, shared memory"); + //StreamCompaction::Naive::shared_scan(NPOT, c, a); + //printElapsedTime(StreamCompaction::Naive::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); + //printArray(SIZE, c, true); + //printCmpResult(NPOT, b, c); + zeroArray(SIZE, c); printDesc("work-efficient scan, power-of-two"); StreamCompaction::Efficient::scan(SIZE, c, a); @@ -95,6 +117,48 @@ int main(int argc, char* argv[]) { //printArray(NPOT, c, true); printCmpResult(NPOT, b, c); + + + + +#ifdef RADIX + zeroArray(SIZE, d); + genArray(SIZE, d, 50); + printArray(SIZE, d, true); + std::vector dVec(SIZE); + for(int i = 0; i < SIZE; i++) { + dVec[i] = d[i]; + } + std::sort(dVec.begin(), dVec.end()); + for(int i = 0; i < SIZE; i++) { + f[i] = dVec[i]; + } + + zeroArray(SIZE, e); + printDesc("radix sort, power-of-two"); + StreamCompaction::Radix::sort(SIZE, e, d); + printElapsedTime(StreamCompaction::Radix::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); + printArray(SIZE, e, true); + printCmpResult(SIZE, f, e); + + std::vector dVecN(NPOT); + for (int i = 0; i < NPOT; i++) { + dVecN[i] = d[i]; + } + std::sort(dVecN.begin(), dVecN.end()); + for (int i = 0; i < NPOT; i++) { + f[i] = dVecN[i]; + } + + zeroArray(NPOT, e); + printDesc("radix sort, non-power-of-two"); + StreamCompaction::Radix::sort(NPOT, e, d); + printElapsedTime(StreamCompaction::Radix::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); + printArray(NPOT, e, true); + printCmpResult(NPOT, f, e); + +#endif + printf("\n"); printf("*****************************\n"); printf("** STREAM COMPACTION TESTS **\n"); @@ -151,4 +215,7 @@ int main(int argc, char* argv[]) { delete[] a; delete[] b; delete[] c; + delete[] d; + delete[] e; + delete[] f; } diff --git a/stream_compaction/CMakeLists.txt b/stream_compaction/CMakeLists.txt index cdbef77..fc37515 100644 --- a/stream_compaction/CMakeLists.txt +++ b/stream_compaction/CMakeLists.txt @@ -9,9 +9,11 @@ set(SOURCE_FILES "efficient.cu" "thrust.h" "thrust.cu" + "radix.h" + "radix.cu" ) cuda_add_library(stream_compaction ${SOURCE_FILES} - OPTIONS -arch=sm_20 + OPTIONS -arch=sm_61 ) diff --git a/stream_compaction/common.cu b/stream_compaction/common.cu index 8fc0211..94237dd 100644 --- a/stream_compaction/common.cu +++ b/stream_compaction/common.cu @@ -24,6 +24,12 @@ namespace StreamCompaction { */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO + + int index = blockDim.x * blockIdx.x + threadIdx.x; + if (index >= n) return; + + bools[index] = idata[index] ? 1 : 0; + } /** @@ -33,6 +39,12 @@ namespace StreamCompaction { __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO + int index = blockDim.x * blockIdx.x + threadIdx.x; + if (index >= n) return; + + if (bools[index]) { + odata[indices[index]] = idata[index]; + } } } diff --git a/stream_compaction/common.h b/stream_compaction/common.h index 99a1b04..d3555f5 100644 --- a/stream_compaction/common.h +++ b/stream_compaction/common.h @@ -13,6 +13,8 @@ #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) +#define blockSize 1024 + /** * Check for CUDA errors; print and exit if there was a problem. */ diff --git a/stream_compaction/cpu.cu b/stream_compaction/cpu.cu index 05ce667..0c57085 100644 --- a/stream_compaction/cpu.cu +++ b/stream_compaction/cpu.cu @@ -1,15 +1,15 @@ #include #include "cpu.h" -#include "common.h" +#include "common.h" namespace StreamCompaction { namespace CPU { - using StreamCompaction::Common::PerformanceTimer; - PerformanceTimer& timer() - { - static PerformanceTimer timer; - return timer; + using StreamCompaction::Common::PerformanceTimer; + PerformanceTimer& timer() + { + static PerformanceTimer timer; + return timer; } /** @@ -18,8 +18,17 @@ namespace StreamCompaction { * (Optional) For better understanding before starting moving to GPU, you can simulate your GPU scan in this function first. */ void scan(int n, int *odata, const int *idata) { + + if(n == 0) return; + timer().startCpuTimer(); // TODO + + odata[0] = 0; + for(int i = 1; i < n; i++) { + odata[i] = idata[i - 1] + odata[i - 1]; + } + timer().endCpuTimer(); } @@ -31,8 +40,16 @@ namespace StreamCompaction { int compactWithoutScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + + int count = 0; + for(int i = 0; i < n; i++) { + if(idata[i] != 0) { + odata[count++] = idata[i]; + } + } + timer().endCpuTimer(); - return -1; + return count; } /** @@ -41,10 +58,38 @@ namespace StreamCompaction { * @returns the number of elements remaining after compaction. */ int compactWithScan(int n, int *odata, const int *idata) { + if(n == 0) return 0; timer().startCpuTimer(); - // TODO + // TODO + + int count = 0; + + int *tmp = (int*)malloc(n * sizeof(int)); + for(int i = 0; i < n; i++) { + if(idata[i] == 0) { + tmp[i] = 0; + } else { + tmp[i] = 1; + } + } + + int *scan = (int*)malloc(n * sizeof(int)); + scan[0] = 0; + for(int i = 1; i < n; i++) { + scan[i] = tmp[i - 1] + scan[i - 1]; + } + + for(int i = 0; i < n; i++) { + if(tmp[i]) { + odata[scan[i]] = idata[i]; + count++; + } + } + timer().endCpuTimer(); - return -1; + free(tmp); + free(scan); + return count; } } } diff --git a/stream_compaction/efficient.cu b/stream_compaction/efficient.cu index 36c5ef2..cfe6ddb 100644 --- a/stream_compaction/efficient.cu +++ b/stream_compaction/efficient.cu @@ -5,20 +5,77 @@ namespace StreamCompaction { namespace Efficient { - using StreamCompaction::Common::PerformanceTimer; - PerformanceTimer& timer() - { - static PerformanceTimer timer; - return timer; + using StreamCompaction::Common::PerformanceTimer; + PerformanceTimer& timer() + { + static PerformanceTimer timer; + return timer; } + __global__ void kernReduction(int n, int d, int *idata) { + int index = blockDim.x * blockIdx.x + threadIdx.x; + if (index >= n) return; + + int offset1 = 1 << d; + int offset2 = 1 << (d + 1); + + idata[index * offset2 + offset2 - 1] += idata[index * offset2 + offset1 - 1]; + } + + __global__ void kernScan(int n, int d, int *idata) { + int index = blockDim.x * blockIdx.x + threadIdx.x; + if (index >= n) return; + + int offset1 = 1 << d; + int offset2 = 1 << (d + 1); + + int leftIdx = index * offset2 + offset1 - 1; + int rightIdx = index * offset2 + offset2 - 1; + + int originLeft = idata[leftIdx]; + idata[leftIdx] = idata[rightIdx]; + idata[rightIdx] += originLeft; + } + + + void scanCore(int nPow, int d, int *dev_idata) { + dim3 gridDim; + for (int i = 0; i < d; i++) { + int scale = 1 << (i + 1); + gridDim = ((nPow / scale + blockSize - 1) / blockSize); + kernReduction << > > (nPow / scale, i, dev_idata); + } + cudaMemset(dev_idata + nPow - 1, 0, sizeof(int)); + for (int i = d - 1; i >= 0; i--) { + int scale = 1 << (i + 1); + gridDim = ((nPow / scale + blockSize - 1) / blockSize); + kernScan << > > (nPow / scale, i, dev_idata); + } + } + /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + + int d = ilog2ceil(n); + int nPow = 1 << d; + int *dev_idata; + cudaMalloc((void**)&dev_idata, nPow * sizeof(int)); + checkCUDAError("cudaMalloc dev_idata failed!"); + cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy dev_idata failed!"); + cudaMemset(dev_idata + n, 0, (nPow - n) * sizeof(int)); + checkCUDAError("cudaMemset dev_idata failed!"); + timer().startGpuTimer(); // TODO + scanCore(nPow, d, dev_idata); + timer().endGpuTimer(); + + cudaMemcpy(odata , dev_idata, n * sizeof(int), cudaMemcpyDeviceToHost); + cudaFree(dev_idata); } /** @@ -31,10 +88,54 @@ namespace StreamCompaction { * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { + + int *dev_idata, *dev_bools, *dev_odata, *dev_indices; + + int d = ilog2ceil(n); + int nPow = 1 << d; + + cudaMalloc((void**)&dev_idata, nPow * sizeof(int)); + checkCUDAError("cudaMalloc dev_idata failed!"); + cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy dev_idata failed!"); + cudaMemset(dev_idata + n, 0, (nPow - n) * sizeof(int)); + checkCUDAError("cudaMemset dev_idata failed!"); + + cudaMalloc((void**)&dev_bools, nPow * sizeof(int)); + checkCUDAError("cudaMalloc dev_bools failed!"); + cudaMalloc((void**)&dev_odata, nPow * sizeof(int)); + checkCUDAError("cudaMalloc dev_odata failed!"); + cudaMalloc((void**)&dev_indices, nPow * sizeof(int)); + checkCUDAError("cudaMalloc dev_indices failed!"); + timer().startGpuTimer(); // TODO + + dim3 gridDim((nPow + blockSize - 1) / blockSize); + StreamCompaction::Common::kernMapToBoolean << > > (nPow, dev_bools, dev_idata); + cudaMemcpy(dev_indices, dev_bools, nPow * sizeof(int), cudaMemcpyDeviceToDevice); + scanCore(nPow, d, dev_indices); + StreamCompaction::Common::kernScatter << > > (nPow, dev_odata, dev_idata, dev_bools, dev_indices); + timer().endGpuTimer(); - return -1; + + cudaMemcpy(odata, dev_odata, nPow * sizeof(int), cudaMemcpyDeviceToHost); + + cudaFree(dev_idata); + cudaFree(dev_odata); + cudaFree(dev_bools); + cudaFree(dev_indices); + + int count = 0; + for (int i = 0; i < n; i++) { + if (odata[i]) { + count++; + } + else { + break; + } + } + return count; } } } diff --git a/stream_compaction/efficient.h b/stream_compaction/efficient.h index 803cb4f..5fc4adc 100644 --- a/stream_compaction/efficient.h +++ b/stream_compaction/efficient.h @@ -6,6 +6,8 @@ namespace StreamCompaction { namespace Efficient { StreamCompaction::Common::PerformanceTimer& timer(); + void scanCore(int n, int d, int *idata); + void scan(int n, int *odata, const int *idata); int compact(int n, int *odata, const int *idata); diff --git a/stream_compaction/naive.cu b/stream_compaction/naive.cu index 9218f8e..f8e7687 100644 --- a/stream_compaction/naive.cu +++ b/stream_compaction/naive.cu @@ -5,21 +5,149 @@ namespace StreamCompaction { namespace Naive { - using StreamCompaction::Common::PerformanceTimer; - PerformanceTimer& timer() - { - static PerformanceTimer timer; - return timer; + using StreamCompaction::Common::PerformanceTimer; + PerformanceTimer& timer() + { + static PerformanceTimer timer; + return timer; } // TODO: __global__ + __global__ void kernNaiveScan(int n, int d, int *idata, int *odata) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= n) { + return; + } + int offset = 1 << (d - 1); + if (index >= offset) { + odata[index] = idata[index - offset] + idata[index]; + } + else { + odata[index] = idata[index]; + } + } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + + int *dev_idata, *dev_odata; + cudaMalloc((void**)&dev_idata, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_idata failed!"); + cudaMalloc((void**)&dev_odata, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_odata failed!"); + cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy dev_idata failed!"); + + int d = ilog2ceil(n); + timer().startGpuTimer(); // TODO + dim3 gridDim((n + blockSize - 1) / blockSize); + for (int i = 1; i <= d; i++) { + kernNaiveScan << > > (n, i, dev_idata, dev_odata); + std::swap(dev_idata, dev_odata); + } + std::swap(dev_idata, dev_odata); + timer().endGpuTimer(); - } + + odata[0] = 0; + cudaMemcpy(odata + 1, dev_odata, (n - 1) * sizeof(int), cudaMemcpyDeviceToHost); + + cudaFree(dev_idata); + cudaFree(dev_odata); + } + + + __global__ void kernSharedNaiveScan(int n, int d, int *odata, int *auxidata, const int *idata, int isMulti) { + extern __shared__ int temp[]; + + int index = threadIdx.x; + int boffset = blockIdx.x * blockDim.x; + + int pout = 1, pin = 0; + temp[pout * n + index + boffset * 2] = idata[pout * n + index + boffset]; + + __syncthreads(); + for(int i = 1; i < (1 << d); i *= 2) { + pout = 1 - pout; + pin = 1 - pout; + + if(index >= i) { + temp[pout * n + index + boffset * 2] += temp[pin * n + index + boffset * 2 - i]; + } else { + temp[pout * n + index + boffset * 2] = temp[pin * n + index + boffset * 2]; + } + + __syncthreads(); + } + + if(index == n - 1 && isMulti) { + if(blockIdx.x) + auxidata[blockIdx.x] = temp[pout * n + index + boffset]; + else + auxidata[blockIdx.x] = 0; + } + + odata[index + boffset] = temp[pout * n + index + boffset]; + } + + __global__ void kernAddAuxi(int n, int *odata, const int *auxi) { + extern __shared__ int temp[]; + + int index = blockDim.x * blockIdx.x + threadIdx.x; + if(index >= n) return; + + temp[blockIdx.x] = auxi[blockIdx.x]; + + __syncthreads(); + + odata[index] += temp[blockIdx.x]; + } + + + + void shared_scan(int n, int *odata, const int *idata) + { + int blockCount = (n + blockSize - 1) / blockSize; + int lastBlockN = n - (blockCount - 1) * blockSize; + int d = ilog2ceil(blockSize); + + int *dev_idata, *dev_odata, *dev_auxi, *dev_tauxi; + + cudaMalloc((void**)&dev_idata, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_idata failed!"); + cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + + cudaMalloc((void**)&dev_odata, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_odata failed!"); + + cudaMalloc((void**)&dev_auxi, blockCount * sizeof(int)); + checkCUDAError("cudaMalloc dev_auxi failed!"); + + cudaMalloc((void**)&dev_tauxi, blockCount * sizeof(int)); + checkCUDAError("cudaMalloc dev_tauxi failed!"); + + int blocknum = blockSize; + int sharedMemory = 2 * blockSize * sizeof(int); + kernSharedNaiveScan<<>>(blocknum, d, dev_odata, dev_auxi, dev_idata, 1); + + // assump blockSize <= blockSize + sharedMemory = 2 * blockCount * sizeof(int); + d = ilog2ceil(blockCount); + kernSharedNaiveScan<<<1, blockCount, sharedMemory>>>(blocknum, d, dev_tauxi, NULL, dev_auxi, 0); + + sharedMemory = blockCount * sizeof(int); + kernAddAuxi<<>> (blocknum, dev_odata, dev_tauxi); + + odata[0] = 0; + cudaMemcpy(odata + 1, dev_odata, (n - 1) * sizeof(int), cudaMemcpyDeviceToHost); + + cudaFree(dev_idata); + cudaFree(dev_odata); + cudaFree(dev_auxi); + + } } } diff --git a/stream_compaction/naive.h b/stream_compaction/naive.h index 37dcb06..bc49781 100644 --- a/stream_compaction/naive.h +++ b/stream_compaction/naive.h @@ -7,5 +7,7 @@ namespace StreamCompaction { StreamCompaction::Common::PerformanceTimer& timer(); void scan(int n, int *odata, const int *idata); + + void shared_scan(int n, int *odata, const int *idata); } } diff --git a/stream_compaction/radix.cu b/stream_compaction/radix.cu new file mode 100644 index 0000000..8a1ec7d --- /dev/null +++ b/stream_compaction/radix.cu @@ -0,0 +1,102 @@ +#include +#include +#include "radix.h" +#include "efficient.h" + +namespace StreamCompaction { + namespace Radix { + using StreamCompaction::Common::PerformanceTimer; + PerformanceTimer& timer() + { + static PerformanceTimer timer; + return timer; + } + + __global__ void kernRadixEArray(int n, int p, int *bdata, int *edata, const int *idata) { + int index = blockDim.x * blockIdx.x + threadIdx.x; + if(index >= n) return; + + int digit = idata[index] & p; + + edata[index] = digit ? 0 : 1; + bdata[index] = digit ? 1 : 0; + } + + __global__ void kernRadixDArray(int n, int totalFalse, int *ddata, const int *fdata, int *bdata) { + int index = blockDim.x * blockIdx.x + threadIdx.x; + if(index >= n) return; + + int f = fdata[index]; + int t = index - f + totalFalse; + ddata[index] = bdata[index] ? t : f; + bdata[index] = 1; + } + + void sort(int n, int *odata, const int *idata){ + + int d = ilog2ceil(n); + int nPow = 1 << d; + + int *dev_idataPow, *dev_bdataPow, *dev_edataPow, *dev_fdataPow, *dev_ddataPow, *dev_odataPow; + int *en, *fn; + + en = (int*)std::malloc(sizeof(int)); + fn = (int*)std::malloc(sizeof(int)); + + cudaMalloc((void**)&dev_idataPow, nPow * sizeof(int)); + checkCUDAError("cudaMalloc dev_idataPow failed!"); + cudaMemcpy(dev_idataPow, idata, n * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy dev_idataPow failed!"); + cudaMemset(dev_idataPow + n, 0, (nPow - n) * sizeof(int)); + checkCUDAError("cudaMemset dev_idataPow failed!"); + + cudaMalloc((void**)&dev_edataPow, nPow * sizeof(int)); + checkCUDAError("cudaMalloc dev_edataPow failed!"); + cudaMemset(dev_edataPow, 0, nPow * sizeof(int)); + cudaMalloc((void**)&dev_bdataPow, nPow * sizeof(int)); + checkCUDAError("cudaMalloc dev_bdataPow failed!"); + cudaMemset(dev_bdataPow, 0, nPow * sizeof(int)); + cudaMalloc((void**)&dev_fdataPow, nPow * sizeof(int)); + checkCUDAError("cudaMalloc dev_fdataPow failed!"); + cudaMemset(dev_fdataPow, 0, nPow * sizeof(int)); + cudaMalloc((void**)&dev_ddataPow, nPow * sizeof(int)); + checkCUDAError("cudaMalloc dev_idata failed!"); + cudaMemset(dev_ddataPow, 0, nPow * sizeof(int)); + cudaMalloc((void**)&dev_odataPow, nPow * sizeof(int)); + checkCUDAError("cudaMalloc dev_odata failed!"); + cudaMemset(dev_odataPow, 0, nPow * sizeof(int)); + + timer().startGpuTimer(); + + dim3 gridDim((n + blockSize - 1) / blockSize); + + for(int p = 1; p <= (1 << 6); p = p << 1) { + kernRadixEArray<<>>(n, p, dev_bdataPow, dev_edataPow, dev_idataPow); + cudaMemcpy(dev_fdataPow, dev_edataPow, nPow * sizeof(int), cudaMemcpyDeviceToDevice); + StreamCompaction::Efficient::scanCore(nPow, d, dev_fdataPow); + cudaMemcpy(en, dev_edataPow + n - 1, sizeof(int), cudaMemcpyDeviceToHost); + cudaMemcpy(fn, dev_fdataPow + n - 1, sizeof(int), cudaMemcpyDeviceToHost); + kernRadixDArray<<>>(n, en[0] + fn[0], dev_ddataPow, dev_fdataPow, dev_bdataPow); + StreamCompaction::Common::kernScatter<<>>(n, dev_odataPow, dev_idataPow, dev_bdataPow, dev_ddataPow); + std::swap(dev_idataPow, dev_odataPow); + } + std::swap(dev_idataPow, dev_odataPow); + + timer().endGpuTimer(); + + cudaMemcpy(odata, dev_odataPow, n * sizeof(int), cudaMemcpyDeviceToHost); + + cudaFree(dev_idataPow); + cudaFree(dev_bdataPow); + cudaFree(dev_edataPow); + cudaFree(dev_fdataPow); + cudaFree(dev_ddataPow); + cudaFree(dev_odataPow); + + free(en); + free(fn); + + } + + } +} \ No newline at end of file diff --git a/stream_compaction/radix.h b/stream_compaction/radix.h new file mode 100644 index 0000000..c50fddd --- /dev/null +++ b/stream_compaction/radix.h @@ -0,0 +1,11 @@ +#pragma once + +#include "common.h" + +namespace StreamCompaction { + namespace Radix { + StreamCompaction::Common::PerformanceTimer& timer(); + + void sort(int n, int *odata, const int *idata); + } +} diff --git a/stream_compaction/thrust.cu b/stream_compaction/thrust.cu index 36b732d..3b3764d 100644 --- a/stream_compaction/thrust.cu +++ b/stream_compaction/thrust.cu @@ -8,21 +8,29 @@ namespace StreamCompaction { namespace Thrust { - using StreamCompaction::Common::PerformanceTimer; - PerformanceTimer& timer() - { - static PerformanceTimer timer; - return timer; + using StreamCompaction::Common::PerformanceTimer; + PerformanceTimer& timer() + { + static PerformanceTimer timer; + return timer; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + //thrust::device_vector dev_idata(idata, idata + n), dev_odata(n); + timer().startGpuTimer(); // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + + //thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin()); + thrust::exclusive_scan(idata, idata + n, odata); + timer().endGpuTimer(); + + //thrust::copy(dev_odata.begin(), dev_odata.end(), odata); } } }