diff --git a/README.md b/README.md index 0e38ddb..d5d586f 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,69 @@ CUDA Stream Compaction **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2** -* (TODO) YOUR NAME HERE - * (TODO) [LinkedIn](), [personal website](), [twitter](), etc. -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Dayu Li +* Tested on: Windows 10, i7-10700K @ 3.80GHz 16GB, GTX 2070 8150MB (Personal laptop) -### (TODO: Your README) +## Features -Include analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) +##### CPU: +-CPU Scan, CPU Comact + +##### GPU: + +-Naive Scan, Work-Efficient Scan, Thrust Scan + +-Work-Efficient Compact + +###### Extra: + +-Why is CPU faster than Work-efficient? + +-Radix Sort algorithm + +![](img/1.png) +## Performance Analysis +### Blocksize Optimization +The graph below shows how the work efficient compact runtime changes with the block size. To me there is no obvious change of the runtime during the change of block size, similar to the previous project. + +![](img/2.png) + +### Runtime Comparison +The graphs below compare the runtime of different scan implementations for the small array size. + +![](img/3.png) + - When the array size is small, CPU scan has the best performance. This is due to the extra efforts spent on calling the kernel and CUDA, as we expected, multi-threaded process runs slower than the normal computations when the task is small. + + - When the array size is large, CPU scan runs slower and gpu is playing better. Where the thrust may be the best. This is due to the utilization of shared memory instead of global memory. +### Extra Credit: +#### Why is My GPU Approach So Slow? +One of the reasons why work efficient scan is slower than expected is the fact that there are too much useless threads during the computation process that are created but never used. The existance of the useless threads will play a more important role when the array size is getting larger. + +To resolve this, I changeed the number of threads (iterations times) within the work efficient compact function, from 2^d to 2^(d-i), this will reduce the number of threads that are created without changing any of the results. + +Before: + - number of threads = 2^d +After: + - number of threads = 2^(d-i) + +After optimization, work efficient is significantly improved. + - testted on 128 blocksize + + ||Before| After +|--|--|--| +|2^8|0.2152 ms|0.03081 ms| +|2^16|4.31 ms|1.062 ms| + +#### Radix sort (Not finished) + +just follow the slides from the course. +Uncomment this part in the main.cpp will show the test result. +``` +//printDesc("radix sort test"); +//genArray(RADIXSIZE, radix, 32); +//zeroArray(RADIXSIZE, radix_sorted); +//StreamCompaction::Efficient::radixSort(RADIXSIZE, 6, radix, radix_sorted); +//printArray(RADIXSIZE, radix, true); +//printArray(RADIXSIZE, radix_sorted, true); +``` diff --git a/img/1.png b/img/1.png new file mode 100644 index 0000000..1ba95bb Binary files /dev/null and b/img/1.png differ diff --git a/img/2.png b/img/2.png new file mode 100644 index 0000000..55f3115 Binary files /dev/null and b/img/2.png differ diff --git a/img/3.png b/img/3.png new file mode 100644 index 0000000..31227f5 Binary files /dev/null and b/img/3.png differ diff --git a/src/main.cpp b/src/main.cpp index 896ac2b..6cbfe1d 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -13,11 +13,14 @@ #include #include "testing_helpers.hpp" -const int SIZE = 1 << 8; // feel free to change the size of array +const int SIZE = 1 << 4; // feel free to change the size of array const int NPOT = SIZE - 3; // Non-Power-Of-Two +const int RADIXSIZE = 20; int *a = new int[SIZE]; int *b = new int[SIZE]; int *c = new int[SIZE]; +int *radix = new int[RADIXSIZE]; +int* radix_sorted = new int[RADIXSIZE]; int main(int argc, char* argv[]) { // Scan tests @@ -147,8 +150,17 @@ int main(int argc, char* argv[]) { //printArray(count, c, true); printCmpLenResult(count, expectedNPOT, b, c); + //printDesc("radix sort test"); + //genArray(RADIXSIZE, radix, 32); + //zeroArray(RADIXSIZE, radix_sorted); + //StreamCompaction::Efficient::radixSort(RADIXSIZE, 6, radix, radix_sorted); + //printArray(RADIXSIZE, radix, true); + //printArray(RADIXSIZE, radix_sorted, true); + system("pause"); // stop Win32 console from closing on exit delete[] a; delete[] b; delete[] c; + delete[] radix; + delete[] radix_sorted; } diff --git a/stream_compaction/common.cu b/stream_compaction/common.cu index 2ed6d63..6267abf 100644 --- a/stream_compaction/common.cu +++ b/stream_compaction/common.cu @@ -24,6 +24,11 @@ namespace StreamCompaction { */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + bools[index] = (idata[index]) ? 1 : 0; } /** @@ -33,6 +38,13 @@ namespace StreamCompaction { __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + if (bools[index]) { + odata[indices[index]] = idata[index]; + } } } diff --git a/stream_compaction/cpu.cu b/stream_compaction/cpu.cu index 719fa11..79222a5 100644 --- a/stream_compaction/cpu.cu +++ b/stream_compaction/cpu.cu @@ -12,6 +12,25 @@ namespace StreamCompaction { return timer; } + void CPUScan(const int* idata, int* odata, int n) + { + odata[0] = 0; + for (int i = 1; i < n; ++i) { + odata[i] = odata[i - 1] + idata[i - 1]; + } + } + + int CPUCompactWithoutScan(int n, int* odata, const int* idata) + { + int count = 0; + for (int i = 0; i < n; ++i) { + if (idata[i] != 0) { + odata[count] = idata[i]; + count++; + } + } + return count; + } /** * CPU scan (prefix sum). * For performance analysis, this is supposed to be a simple for loop. @@ -20,6 +39,7 @@ namespace StreamCompaction { void scan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + CPUScan(idata, odata, n); timer().endCpuTimer(); } @@ -31,8 +51,9 @@ namespace StreamCompaction { int compactWithoutScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + int count = CPUCompactWithoutScan(n, odata, idata); timer().endCpuTimer(); - return -1; + return count; } /** @@ -40,11 +61,32 @@ namespace StreamCompaction { * * @returns the number of elements remaining after compaction. */ - int compactWithScan(int n, int *odata, const int *idata) { + int compactWithScan(int n, int* odata, const int* idata) { + + int* boolArray = (int*)malloc(n * sizeof(int)); + int* indexArray = (int*)malloc(n * sizeof(int)); + timer().startCpuTimer(); // TODO + for (int i = 0; i < n; ++i) { + boolArray[i] = (idata[i]) ? 1 : 0; + } + + // scan + CPUScan(boolArray, indexArray, n); + + // scatter + + for (int i = 0; i < n; ++i) { + if (boolArray[i]) { + odata[indexArray[i]] = idata[i]; + } + } + int count = boolArray[n - 1] ? indexArray[n - 1] + 1 : indexArray[n - 1]; timer().endCpuTimer(); - return -1; + free(boolArray); + free(indexArray); + return count; } } } diff --git a/stream_compaction/efficient.cu b/stream_compaction/efficient.cu index 2db346e..36cff9b 100644 --- a/stream_compaction/efficient.cu +++ b/stream_compaction/efficient.cu @@ -3,6 +3,7 @@ #include "common.h" #include "efficient.h" +#define BLOCKSIZE 128 namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; @@ -11,14 +12,99 @@ namespace StreamCompaction { static PerformanceTimer timer; return timer; } + __global__ void kernUpSweep(int n, int d, int* idata) { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + int offset = 1 << d; //2^d + int offsetDouble = offset * 2; //2^(d+1) + int indexRemap = index * offsetDouble; + idata[indexRemap + offsetDouble - 1] += idata[indexRemap + offset - 1]; + } + + __global__ void kernDownSweep(int n, int d, int* idata) { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + int offset = 1 << d; //2^d + int offsetDouble = offset * 2; //2^(d+1) + int indexRemap = index * offsetDouble; + int temp = idata[indexRemap + offset - 1]; + idata[indexRemap + offset - 1] = idata[indexRemap + offsetDouble - 1]; + idata[indexRemap + offsetDouble - 1] += temp; + } + + //For radix sort extra credits + + __global__ void kernRadixBarray(int n, int bitNum, const int* idata, int* bArray, int* eArray) + { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + bArray[index] = (idata[index] >> bitNum & 1) ? 1 : 0; + eArray[index] = (idata[index] >> bitNum & 1) ? 0 : 1; + } + + __global__ void kernRadixTarray(int n, const int* fArray, int* tArray, int d) + { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + tArray[index] = index - fArray[index] + d; + } + + __global__ void kernRadixDarray(int n, const int* bArray, const int* fArray, const int* tArray, int* dArray) + { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + dArray[index] = (bArray[index]) ? tArray[index] : dArray[index]; + } + __global__ void kernRadixRemap(int n, const int* dArray, int* idata) + { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + idata[dArray[index]]= idata[index]; + } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + int size = powf(2, ilog2ceil(n)); + int* dev_idata; + cudaMalloc((void**)&dev_idata, size * sizeof(int)); + cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); timer().startGpuTimer(); + dim3 blockSize(BLOCKSIZE); + dim3 gridSize((size + BLOCKSIZE - 1) / BLOCKSIZE); // TODO + int d = ilog2ceil(n) - 1; + for (int i = 0; i <= d; ++i) { + int threadsNum = 1 << (d - i); + gridSize = dim3((threadsNum + BLOCKSIZE - 1) / BLOCKSIZE); + kernUpSweep << > > (threadsNum, i, dev_idata); + } + cudaMemset((void*)&(dev_idata[size - 1]), 0, sizeof(int)); + + for (int j = d; j >= 0; --j) { + int threadsNum = 1 << (d - j); + gridSize = dim3((threadsNum + BLOCKSIZE - 1) / BLOCKSIZE); + kernDownSweep << > > (threadsNum, j, dev_idata); + } timer().endGpuTimer(); + + cudaMemcpy(odata, dev_idata, n * sizeof(int), cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy odata failed!"); + cudaFree(dev_idata); + checkCUDAError("cudaFree(dev_idata) failed!"); } /** @@ -31,10 +117,139 @@ namespace StreamCompaction { * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { + + int pow2d = powf(2, ilog2ceil(n)); + int* boolArray; + int* indexArray; + int* dev_idata; + int* dev_odata; + int* dev_boolArray; + int* dev_indexArray; + + cudaMalloc((void**)&dev_idata, n * sizeof(int)); + cudaMalloc((void**)&dev_odata, n * sizeof(int)); + cudaMalloc((void**)&dev_boolArray, n * sizeof(int)); + cudaMalloc((void**)&dev_indexArray, pow2d * sizeof(int)); // This takes more memory space. + + boolArray = (int*)malloc(n * sizeof(int)); + indexArray = (int*)malloc(n * sizeof(int)); + + int gridSize = (n + BLOCKSIZE - 1) / BLOCKSIZE; + int gridSize2 = (pow2d + BLOCKSIZE - 1) / BLOCKSIZE; + // copy memory to device + cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); timer().startGpuTimer(); // TODO + // Map the boolean array + StreamCompaction::Common::kernMapToBoolean << > > (n, dev_boolArray, dev_idata); + // Do an exclusive scan + cudaMemcpy(dev_indexArray, dev_boolArray, n * sizeof(int), cudaMemcpyDeviceToDevice); + + int d = ilog2ceil(n) - 1; + for (int i = 0; i <= d; ++i) { + int threadsNum = 1 << (d - i); + gridSize2 = (threadsNum + BLOCKSIZE - 1) / BLOCKSIZE; + kernUpSweep << > > (threadsNum, i, dev_indexArray); + } + cudaMemset((void*)&(dev_indexArray[pow2d - 1]), 0, sizeof(int)); + + for (int j = d; j >= 0; --j) { + int threadsNum = 1 << (d - j); + gridSize2 = (threadsNum + BLOCKSIZE - 1) / BLOCKSIZE; + kernDownSweep << > > (threadsNum, j, dev_indexArray); + } + + // Scatter + StreamCompaction::Common::kernScatter << > > (n, dev_odata, dev_idata, dev_boolArray, dev_indexArray); + + timer().endGpuTimer(); - return -1; + + // Copy the memory out + cudaMemcpy(indexArray, dev_indexArray, n * sizeof(int), cudaMemcpyDeviceToHost); + checkCUDAError("get indexArray failed!"); + cudaMemcpy(boolArray, dev_boolArray, n * sizeof(int), cudaMemcpyDeviceToHost); + checkCUDAError("get boolArray failed!"); + cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost); + checkCUDAError("get odata failed!"); + + int count = boolArray[n - 1] ? indexArray[n - 1] + 1 : indexArray[n - 1]; + + // Free Mem spaces + cudaFree(dev_idata); + checkCUDAError("cudaFree(dev_idata) failed!"); + cudaFree(dev_odata); + checkCUDAError("cudaFree(dev_odata) failed!"); + cudaFree(dev_boolArray); + checkCUDAError("cudaFree(dev_boolArray) failed!"); + cudaFree(dev_indexArray); + checkCUDAError("cudaFree(dev_indexArray) failed!"); + + free(boolArray); + free(indexArray); + + return count; + } + + void radixSort(int n, int bitNum, int* odata, int* idata) + { + int* dev_idata; + int* dev_bArray; + int* dev_eArray; + int* dev_fArray; + int* dev_tArray; + int* dev_dArray; + + int* fArray; + int* eArray; + //Malloc + cudaMalloc((void**)&dev_idata, n * sizeof(int)); + cudaMalloc((void**)&dev_bArray, n * sizeof(int)); + cudaMalloc((void**)&dev_eArray, n * sizeof(int)); + cudaMalloc((void**)&dev_fArray, n * sizeof(int)); + cudaMalloc((void**)&dev_tArray, n * sizeof(int)); + cudaMalloc((void**)&dev_dArray, n * sizeof(int)); + + fArray = (int*)malloc(n * sizeof(int)); + eArray = (int*)malloc(n * sizeof(int)); + + dim3 blockSize(BLOCKSIZE); + dim3 gridSize((n + BLOCKSIZE - 1) / BLOCKSIZE); + + //Copy data from host to device + cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy dev_idata failed!"); + int falseCount = 0; + + for (int i = 0; i < bitNum; ++i) + { + //Get B-array and E-array + kernRadixBarray << > > (n, i, dev_idata, dev_bArray, dev_eArray); + //Get F-array + StreamCompaction::Efficient::scan(n, dev_fArray, dev_eArray); + cudaMemcpy(fArray, dev_fArray, n * sizeof(int), cudaMemcpyDeviceToHost); + cudaMemcpy(eArray, dev_eArray, n * sizeof(int), cudaMemcpyDeviceToHost); + falseCount = fArray[n - 1] + eArray[n - 1]; + + kernRadixTarray << > > (n, dev_fArray, dev_tArray, falseCount); + kernRadixDarray << > > (n, dev_bArray, dev_fArray, dev_tArray, dev_dArray); + + //Scatter + kernRadixRemap << > > (n, dev_dArray, dev_idata); + } + + cudaMemcpy(odata, dev_idata, n * sizeof(int), cudaMemcpyDeviceToHost);//get the result + checkCUDAError("get odata failed!\n"); + + cudaFree(dev_idata); + cudaFree(dev_bArray); + cudaFree(dev_eArray); + cudaFree(dev_fArray); + cudaFree(dev_tArray); + cudaFree(dev_dArray); + + free(fArray); + free(eArray); } } } diff --git a/stream_compaction/efficient.h b/stream_compaction/efficient.h index 803cb4f..6f57a8c 100644 --- a/stream_compaction/efficient.h +++ b/stream_compaction/efficient.h @@ -9,5 +9,7 @@ namespace StreamCompaction { void scan(int n, int *odata, const int *idata); int compact(int n, int *odata, const int *idata); + + void radixSort(int n, int bitNum, int* odata, int* idata); } } diff --git a/stream_compaction/naive.cu b/stream_compaction/naive.cu index 4308876..3a6488b 100644 --- a/stream_compaction/naive.cu +++ b/stream_compaction/naive.cu @@ -3,6 +3,8 @@ #include "common.h" #include "naive.h" +#define BLOCKSIZE 128 + namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; @@ -12,14 +14,65 @@ namespace StreamCompaction { return timer; } // TODO: __global__ - + __global__ void kernScanNaive(int n, int* odata, int* idata, int lowbound) + { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + if (index >= lowbound){ + odata[index] = idata[index - lowbound] + idata[index]; + } + else{ + odata[index] = idata[index]; + } + } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + + int* dev_idata; + int* dev_odata; + cudaMalloc((void**)&dev_idata, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_idata failed!"); + cudaMalloc((void**)&dev_odata, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_odata failed!"); + + cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAErrorFn("cudaMemcpy (host to device) failed!"); + + int d = ilog2ceil(n);//log2 n + dim3 fullBlocksPerGrid((n + BLOCKSIZE - 1) / BLOCKSIZE); timer().startGpuTimer(); // TODO + for (int i = 1; i <= d; i++) { + if((i % 2)) + { + kernScanNaive << > > (n, dev_odata, dev_idata, 1 << (i - 1)); + } + else + { + kernScanNaive << > > (n, dev_idata, dev_odata, 1 << (i - 1)); + } + } timer().endGpuTimer(); + + //Decide which is the source of the final scan + int* source; + if (d % 2) { + source = dev_odata; + } + else { + source = dev_idata; + } + //To transfer inclusive scan to exclusive scan, do a right shift. + cudaMemcpy(odata + 1, source, (n-1) * sizeof(int), cudaMemcpyDeviceToHost); + checkCUDAError("get odata failed!"); + odata[0] = 0; + //free + cudaFree(dev_idata); + cudaFree(dev_odata); } } } diff --git a/stream_compaction/radix.h b/stream_compaction/radix.h new file mode 100644 index 0000000..ecdb16f --- /dev/null +++ b/stream_compaction/radix.h @@ -0,0 +1,12 @@ +#pragma once + +#include "common.h" +#include "efficient.h" + +namespace StreamCompaction { + namespace Radix { + StreamCompaction::Common::PerformanceTimer& timer(); + + void radixSort(int n, int bits_num, int* odata, const int* idata); + } +} \ No newline at end of file diff --git a/stream_compaction/thrust.cu b/stream_compaction/thrust.cu index 1def45e..183607c 100644 --- a/stream_compaction/thrust.cu +++ b/stream_compaction/thrust.cu @@ -18,11 +18,18 @@ namespace StreamCompaction { * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + + thrust::host_vector host_idata(idata, idata + n); + thrust::device_vector dev_idata = host_idata; + thrust::device_vector dev_odata(n); timer().startGpuTimer(); // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin()); timer().endGpuTimer(); + + thrust::copy(dev_odata.begin(), dev_odata.end(), odata); } } }