diff --git a/README.md b/README.md index 0e38ddb..e8bd4f0 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,125 @@ CUDA Stream Compaction **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2** -* (TODO) YOUR NAME HERE - * (TODO) [LinkedIn](), [personal website](), [twitter](), etc. -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Name: Bowen Yang + * [LinkedIn](https://www.linkedin.com/in/%E5%8D%9A%E6%96%87-%E6%9D%A8-83bba6148) + * [GitHub](https://github.com/Grillnov) + * [Facebook](https://www.facebook.com/yang.bowen.7399) + * [Steam](https://steamcommunity.com/id/grillnov) +* Tested on: Windows 10 x64, i7-6800K @ 3.40GHz 32GB, GTX 1080 8GB (Personal computer at home) -### (TODO: Your README) +**Description** -Include analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) +Stream compaction from scratch. In our case, we're to filter out all the elements that equals to 0. Analogly it's the dead paths in the list of all the rays. +*Part 1: CPU implementation* +*Part 2: Brute force naive reduction on GPU* +*Part 3: Efficient implementation that's actually not so efficient* +*Part 4: Thrust* + +**Issues** + +Just as always, I modified the `sm_20` option to `sm_60` to make it compile on nvcc 9.2. + +**Performance Test** + +**Scan** + +*When element number is exactly 2-power* + +![](img/2Power.png) + +*When element number is not exactly 2-powered* + +![](img/n2power.png) + +**Compaction** + +*When element number is exactly 2-powered* + +![](img/compact2Power.png) + +*When element number is not exactly 2-powered* + +![](img/compactn2power.png) + +**Thrust implementation** + +*When element number is exactly 2-powered* + +![](img/thrust2power.png) + +*When element number is not exactly 2-powered* + +![](img/thrustn2power.png) + +**The results + +``` +**************** +** SCAN TESTS ** +**************** + [ 34 23 45 34 24 43 35 44 26 22 13 28 37 ... 47 0 ] +==== cpu scan, power-of-two ==== + elapsed time: 0.001204ms (std::chrono Measured) + [ 0 34 57 102 136 160 203 238 282 308 330 343 371 ... 6116 6163 ] +==== cpu scan, non-power-of-two ==== + elapsed time: 0.001205ms (std::chrono Measured) + [ 0 34 57 102 136 160 203 238 282 308 330 343 371 ... 6064 6085 ] + passed +==== naive scan, power-of-two ==== + elapsed time: 0.033792ms (CUDA Measured) + passed +==== naive scan, non-power-of-two ==== + elapsed time: 0.032896ms (CUDA Measured) + passed +==== work-efficient scan, power-of-two ==== + elapsed time: 0.123904ms (CUDA Measured) + passed +==== work-efficient scan, non-power-of-two ==== + elapsed time: 0.126976ms (CUDA Measured) + passed +==== thrust scan, power-of-two ==== + elapsed time: 13.1891ms (CUDA Measured) + passed +==== thrust scan, non-power-of-two ==== + elapsed time: 0.96256ms (CUDA Measured) + passed + +***************************** +** STREAM COMPACTION TESTS ** +***************************** + [ 0 3 3 0 1 2 0 2 2 2 1 2 2 ... 3 0 ] +==== cpu compact without scan, power-of-two ==== + elapsed time: 0.001506ms (std::chrono Measured) + [ 3 3 1 2 2 2 2 1 2 2 3 2 1 ... 3 3 ] + passed +==== cpu compact without scan, non-power-of-two ==== + elapsed time: 0.002108ms (std::chrono Measured) + [ 3 3 1 2 2 2 2 1 2 2 3 2 1 ... 2 3 ] + passed +==== cpu compact with scan ==== + elapsed time: 0.003313ms (std::chrono Measured) + [ 3 3 1 2 2 2 2 1 2 2 3 2 1 ... 3 3 ] + passed +==== work-efficient compact, power-of-two ==== + elapsed time: 0.23984ms (CUDA Measured) + passed +==== work-efficient compact, non-power-of-two ==== + elapsed time: 0.238592ms (CUDA Measured) + passed +``` + +**The explanation for the efficiency of the efficient implementation** + +From the charts we find the surprising fact that, *efficient* implementation is actually not that efficient. +2 main reasons for this to happen: + +**I/O intensive** +In our efficient implementation, we have to write the initial element to the GPU buffer or read the last element of the buffer back. This causes lots of system interrupts and therefore is harmful to performance. + +**The reduce algorithm itself** +With the layer going even deeper, stride becomes larger and larger, which, is a behavior that all caches hate. The spatial locality is horrible when the layer goes deep. + +**Something wrong with the thread scheduling** +With the layer going deeper, more and more threads become idle and does nothing useful, and even worse, with the stride growing larger, branch divergence inside warps is getting unacceptable. diff --git a/img/2Power.png b/img/2Power.png new file mode 100644 index 0000000..ba8c513 Binary files /dev/null and b/img/2Power.png differ diff --git a/img/compact2Power.png b/img/compact2Power.png new file mode 100644 index 0000000..cfba4bb Binary files /dev/null and b/img/compact2Power.png differ diff --git a/img/compactn2power.png b/img/compactn2power.png new file mode 100644 index 0000000..f774ddb Binary files /dev/null and b/img/compactn2power.png differ diff --git a/img/n2power.png b/img/n2power.png new file mode 100644 index 0000000..ba8c513 Binary files /dev/null and b/img/n2power.png differ diff --git a/img/thrust2power.png b/img/thrust2power.png new file mode 100644 index 0000000..6c03e6f Binary files /dev/null and b/img/thrust2power.png differ diff --git a/img/thrustn2power.png b/img/thrustn2power.png new file mode 100644 index 0000000..3ce3e64 Binary files /dev/null and b/img/thrustn2power.png differ diff --git a/stream_compaction/CMakeLists.txt b/stream_compaction/CMakeLists.txt index cdbef77..48e2f35 100644 --- a/stream_compaction/CMakeLists.txt +++ b/stream_compaction/CMakeLists.txt @@ -13,5 +13,5 @@ set(SOURCE_FILES cuda_add_library(stream_compaction ${SOURCE_FILES} - OPTIONS -arch=sm_20 + OPTIONS -arch=sm_60 ) diff --git a/stream_compaction/common.cu b/stream_compaction/common.cu index 8fc0211..7328d28 100644 --- a/stream_compaction/common.cu +++ b/stream_compaction/common.cu @@ -24,6 +24,20 @@ namespace StreamCompaction { */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO + int idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx >= n) + { + return; + } + + if (idata[idx] == MEETCRITERION) + { + bools[idx] = HAS_MET; + } + else + { + bools[idx] = NOT_MET; + } } /** @@ -33,7 +47,20 @@ namespace StreamCompaction { __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO - } + int idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx >= n) + { + return; + } + if (idx == n - 1) + { + odata[indices[idx]] = idata[idx]; + } + else if (indices[idx] != indices[idx + 1]) + { + odata[indices[idx]] = idata[idx]; + } + } } } diff --git a/stream_compaction/common.h b/stream_compaction/common.h index 99a1b04..35403e8 100644 --- a/stream_compaction/common.h +++ b/stream_compaction/common.h @@ -130,3 +130,9 @@ namespace StreamCompaction { }; } } + +// Used as a boolean function for elaborating if a certain element meets criterion +// 1 for not met, 0 for met +enum ElementAttribute { HAS_MET, NOT_MET }; +# define MEETCRITERION 0 +# define BLOCKSIZE 1024 \ No newline at end of file diff --git a/stream_compaction/cpu.cu b/stream_compaction/cpu.cu index 05ce667..ecc4e13 100644 --- a/stream_compaction/cpu.cu +++ b/stream_compaction/cpu.cu @@ -1,15 +1,15 @@ #include #include "cpu.h" -#include "common.h" +#include "common.h" namespace StreamCompaction { namespace CPU { - using StreamCompaction::Common::PerformanceTimer; - PerformanceTimer& timer() - { - static PerformanceTimer timer; - return timer; + using StreamCompaction::Common::PerformanceTimer; + PerformanceTimer& timer() + { + static PerformanceTimer timer; + return timer; } /** @@ -20,6 +20,14 @@ namespace StreamCompaction { void scan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + // Insert I + odata[0] = 0; + // Compute the sums + for (int i = 1; i != n; ++i) + { + odata[i] = idata[i - 1] + odata[i - 1]; + } + timer().endCpuTimer(); } @@ -31,8 +39,18 @@ namespace StreamCompaction { int compactWithoutScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + int remaining = 0; + for (int i = 0; i != n; ++i) + { + if (idata[i] != 0) + { + odata[remaining] = idata[i]; + ++remaining; + } + } + timer().endCpuTimer(); - return -1; + return remaining; } /** @@ -41,10 +59,46 @@ namespace StreamCompaction { * @returns the number of elements remaining after compaction. */ int compactWithScan(int n, int *odata, const int *idata) { - timer().startCpuTimer(); + + int* hasChanged = new int[n]; + int* sum = new int[n]; + + timer().startCpuTimer(); // TODO + + for (int i = 0; i != n; ++i) + { + if (idata[i] == MEETCRITERION) + { + hasChanged[i] = HAS_MET; + } + else + { + hasChanged[i] = NOT_MET; + } + } + + sum[0] = 0; + for (int i = 1; i != n; ++i) + { + sum[i] = hasChanged[i - 1] + sum[i - 1]; + } + int remaining = 0; + for (int i = 0; i < n; ++i) + { + if (hasChanged[i] == NOT_MET) + { + odata[sum[i]] = idata[i]; + ++remaining; + } + } + timer().endCpuTimer(); - return -1; + + delete[] hasChanged; + delete[] sum; + + return remaining; } } } diff --git a/stream_compaction/efficient.cu b/stream_compaction/efficient.cu index 36c5ef2..73276c8 100644 --- a/stream_compaction/efficient.cu +++ b/stream_compaction/efficient.cu @@ -5,20 +5,120 @@ namespace StreamCompaction { namespace Efficient { - using StreamCompaction::Common::PerformanceTimer; - PerformanceTimer& timer() - { - static PerformanceTimer timer; - return timer; + using StreamCompaction::Common::PerformanceTimer; + PerformanceTimer& timer() + { + static PerformanceTimer timer; + return timer; } + //No-longer const int* idata, this time we do it in-place + __global__ void upSweeping(int n, int stride, int *idata) + { + int idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx >= n) + { + return; + } + + int stride2times = stride * 2; + if (idx % stride2times == 0) + { + idata[idx + stride2times - 1] += idata[idx + stride - 1]; + } + } + __global__ void downSweeping(int n, int stride, int *idata) + { + int idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx >= n) + { + return; + } + + int stride2times = stride * 2; + if (idx % stride2times == 0) + { + int indexCorresponding = idx + stride - 1; + int indexCorresponding2times = idx + stride2times - 1; + int delta = idata[indexCorresponding]; + idata[indexCorresponding] = idata[indexCorresponding2times]; + idata[indexCorresponding2times] += delta; + } + } + + //Tell if a number is power of 2 + bool isPowerOfTwo(int n) + { + if (n == 0) + { + return false; + } + else + { + return (n & (n - 1)) == 0; + } + } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + int layer = ilog2ceil(n); + int elementNumber = 0; + + if (isPowerOfTwo(n)) + { + elementNumber = n; + } + else + { + elementNumber = 1 << layer; + } + int *paddedInput = new int[elementNumber]; + //Copy and pad the redundant elements with 0, if any + for (int i = 0; i < elementNumber; ++i) + { + if (i >= n) + { + paddedInput[i] = 0; + } + else + { + paddedInput[i] = idata[i]; + } + } + + int bufferSize = elementNumber * sizeof(int); + int* paddedIDataDev = nullptr; + + cudaMalloc(reinterpret_cast(&paddedIDataDev), bufferSize); + checkCUDAError("Malloc for padded input failed"); + cudaMemcpy(reinterpret_cast(paddedIDataDev), paddedInput, bufferSize, cudaMemcpyHostToDevice); + checkCUDAError("Memcpy for padded input failed from host to device"); + timer().startGpuTimer(); // TODO + dim3 gridLayout((n - 1 + BLOCKSIZE) / BLOCKSIZE); + dim3 blockLayout(BLOCKSIZE); + + for (int layerI = 0; layerI < layer - 1; ++layerI) + { + int stride = 1 << layerI; + upSweeping<<>>(elementNumber, stride, paddedIDataDev); + } + int initialZero = 0; + cudaMemcpy(reinterpret_cast(paddedIDataDev + elementNumber - 1), &initialZero, sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("Memcpy for the initial 0 failed from host to device"); + for (int layerI = layer - 1; layerI >= 0; --layerI) + { + int stride = 1 << layerI; + downSweeping<<>>(elementNumber, stride, paddedIDataDev); + } + timer().endGpuTimer(); + + cudaMemcpy(odata, reinterpret_cast(paddedIDataDev), bufferSize, cudaMemcpyDeviceToHost); + checkCUDAError("Memcpy for padded input failed from device to host"); + cudaFree(paddedIDataDev); } /** @@ -31,10 +131,85 @@ namespace StreamCompaction { * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { - timer().startGpuTimer(); - // TODO - timer().endGpuTimer(); - return -1; + int layer = ilog2ceil(n); + int elementNumber = 0; + + if (isPowerOfTwo(n)) + { + elementNumber = n; + } + else + { + elementNumber = 1 << layer; + } + int *paddedInput = new int[elementNumber]; + //Copy and pad the redundant elements with 0, if any + for (int i = 0; i < elementNumber; ++i) + { + if (i >= n) + { + paddedInput[i] = 0; + } + else + { + paddedInput[i] = idata[i]; + } + } + + int bufferSize = elementNumber * sizeof(int); + + int* paddedIDataDev; + int* oDataDev; + int* mDataDev; + cudaMalloc(reinterpret_cast(&paddedIDataDev), bufferSize); + checkCUDAError("Malloc for padded input failed"); + cudaMalloc(reinterpret_cast(&oDataDev), bufferSize); + checkCUDAError("Malloc for output failed"); + cudaMalloc(reinterpret_cast(&mDataDev), bufferSize); + checkCUDAError("Malloc for intermediate failed"); + cudaMemcpy(paddedIDataDev, paddedInput, bufferSize, cudaMemcpyHostToDevice); + checkCUDAError("Memcpy for padded input failed"); + + timer().startGpuTimer(); + // TODO + dim3 gridLayout((n - 1 + BLOCKSIZE) / BLOCKSIZE); + dim3 blockLayout(BLOCKSIZE); + + StreamCompaction::Common::kernMapToBoolean<<>>(elementNumber, mDataDev, paddedIDataDev); + int endingElement = 0; + cudaMemcpy(&endingElement, reinterpret_cast(mDataDev + elementNumber - 1), sizeof(int), cudaMemcpyDeviceToHost); + + for (int layerI = 0; layerI < layer - 1; ++layerI) + { + int stride = 1 << layerI; + upSweeping<<>>(elementNumber, stride, mDataDev); + } + int initialZero = 0; + cudaMemcpy(reinterpret_cast(mDataDev + elementNumber - 1), &initialZero, sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("Memcpy for the initial 0 failed from host to device"); + for (int layerI = layer - 1; layerI >= 0; --layerI) + { + int stride = 1 << layerI; + downSweeping<<>>(elementNumber, stride, mDataDev); + } + int elements = 0; + cudaMemcpy(&elements, reinterpret_cast(mDataDev + elementNumber - 1), sizeof(int), cudaMemcpyDeviceToHost); + checkCUDAError("Memcpy for element count failed from device to host"); + StreamCompaction::Common::kernScatter<<>>(elementNumber, oDataDev, paddedIDataDev, mDataDev, mDataDev); + + + timer().endGpuTimer(); + + cudaMemcpy(odata, reinterpret_cast(oDataDev), bufferSize, cudaMemcpyDeviceToHost); + if (endingElement == 1) + { + ++elements; + } + cudaFree(paddedIDataDev); + cudaFree(oDataDev); + cudaFree(mDataDev); + + return elements; } } } diff --git a/stream_compaction/naive.cu b/stream_compaction/naive.cu index 9218f8e..a014ae5 100644 --- a/stream_compaction/naive.cu +++ b/stream_compaction/naive.cu @@ -5,21 +5,82 @@ namespace StreamCompaction { namespace Naive { - using StreamCompaction::Common::PerformanceTimer; - PerformanceTimer& timer() - { - static PerformanceTimer timer; - return timer; + using StreamCompaction::Common::PerformanceTimer; + PerformanceTimer& timer() + { + static PerformanceTimer timer; + return timer; } // TODO: __global__ + // Stride: 2^n, the stride of the elements + __global__ void streamCompactionNaive(int n, int stride, const int* idata, int* odata) + { + int idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx >= n) + { + return; + } + + // Add element + if (idx >= stride) + { + odata[idx] = idata[idx - stride] + idata[idx]; + } + // Got no element to add + else + { + odata[idx] = idata[idx]; + } + } + + void swapBuffer(int*& ptr1, int*& ptr2) + { + int* temp = ptr1; + ptr1 = ptr2; + ptr2 = temp; + } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { - timer().startGpuTimer(); - // TODO + int* idataDev = nullptr; + int* odataDev = nullptr; + size_t heapSize = sizeof(int) * n; + cudaMalloc(reinterpret_cast(&idataDev), heapSize); + checkCUDAError("Malloc for input data failed"); + cudaMalloc(reinterpret_cast(&odataDev), heapSize); + checkCUDAError("Malloc for output data failed"); + + cudaMemcpy(reinterpret_cast(idataDev), idata, heapSize, cudaMemcpyHostToDevice); + checkCUDAError("Memcpy input data host 2 device failed"); + cudaMemcpy(reinterpret_cast(odataDev), odata, heapSize, cudaMemcpyHostToDevice); + checkCUDAError("Memcpy input data host 2 device failed"); + + dim3 gridLayout((n - 1 + BLOCKSIZE) / BLOCKSIZE); + dim3 blockLayout(BLOCKSIZE); + + //Layers of the problem + int layer = ilog2ceil(n); + + timer().startGpuTimer(); + // TODO + for (int layerI = 0; layerI < layer; ++layerI) + { + int strideWidth = 1 << layerI; + streamCompactionNaive<<>>(n, strideWidth, idataDev, odataDev); + + swapBuffer(idataDev, odataDev); + } + swapBuffer(idataDev, odataDev); timer().endGpuTimer(); + + cudaMemcpy(odata + 1, reinterpret_cast(odataDev), heapSize - sizeof(int), cudaMemcpyDeviceToHost); + odata[0] = 0; + checkCUDAError("Memcpy output data device 2 host failed"); + + cudaFree(idataDev); + cudaFree(odataDev); } } } diff --git a/stream_compaction/thrust.cu b/stream_compaction/thrust.cu index 36b732d..dc34bd5 100644 --- a/stream_compaction/thrust.cu +++ b/stream_compaction/thrust.cu @@ -8,21 +8,26 @@ namespace StreamCompaction { namespace Thrust { - using StreamCompaction::Common::PerformanceTimer; - PerformanceTimer& timer() - { - static PerformanceTimer timer; - return timer; + using StreamCompaction::Common::PerformanceTimer; + PerformanceTimer& timer() + { + static PerformanceTimer timer; + return timer; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + thrust::device_vector dv_in(idata, idata + n); + thrust::device_vector dv_out(odata, odata + n); + timer().startGpuTimer(); // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: - // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); timer().endGpuTimer(); + + thrust::copy(dv_out.begin(), dv_out.end(), odata); } } }