diff --git a/README.md b/README.md index 0e38ddb..a1b40da 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,84 @@ CUDA Stream Compaction **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2** -* (TODO) YOUR NAME HERE - * (TODO) [LinkedIn](), [personal website](), [twitter](), etc. -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Hanyu Liu + * [personal website](http://liuhanyu.net/) +* Tested on: Windows 10, Intel(R) Core(TM) i7-7700HQ CPU @ 2.80GHz 16.0GB, GeForce GTX 1050 (Personal) + +### Summary of Project + +In this project, I implemented stream compaction, which simply removes `0`s from an array of `1`s, on both the CPU and the GPU in CUDA. Stream compaction uses the scan (Prefix Sum) Algorithm, and in this project, I implemented four different scan methods: 1) Scan on the CPU, 2) Naive, 3) Work-Efficient, and 4) Using Thrust. Furthermore, GPU stream compaction also needed additional kernels to generate a boolean mask and scatter, which I also implemented. With the help of these functions, I was able to implement stream compaction on both the GPU and the CPU. + + + +Stream compaction is widely used, and will be used to accelerate my future path tracer project. + + + +### Performance Analysis + +![](img/performance.png) + +1. For the Thrust implementation, the runtime is significantly lower than the other implementations. This is possibly due to the small amount of memory copy as it alters the data in place without the need for extra buffers. + +2. Here, we see that the Naive and Work-Efficient scans both take much longer than the CPU scan at large array sizes even though we are altering the array in parallel on the GPU. In fact, the work-efficient scan takes much longer than the naive scan. Both GPU implementations take longer possibly because there is more overhead from the kernel calls. As for the difference between Naive and Work-Efficient scans, the work-efficient scan takes two for loops on larger, padded arrays, which causes it to be slower than the naive implementation. Ultimately, the bottle-neck is the number of additions we have to perform, which is log base2 of n. The rest of the performance depends on memory allocation, the cache, and overhead, in which case, CPU would win out. + +3. ``` + + **************** + ** SCAN TESTS ** + **************** + [ 3 5 18 11 7 47 39 47 30 45 5 9 40 ... 23 0 ] + ==== cpu scan, power-of-two ==== + elapsed time: 0.0023ms (std::chrono Measured) + [ 0 3 8 26 37 44 91 130 177 207 252 257 266 ... 24162 24185 ] + ==== cpu scan, non-power-of-two ==== + elapsed time: 0.0051ms (std::chrono Measured) + [ 0 3 8 26 37 44 91 130 177 207 252 257 266 ... 24142 24149 ] + passed + ==== naive scan, power-of-two ==== + elapsed time: 0.023552ms (CUDA Measured) + passed + ==== naive scan, non-power-of-two ==== + elapsed time: 0.022528ms (CUDA Measured) + passed + ==== work-efficient scan, power-of-two ==== + elapsed time: 0.1024ms (CUDA Measured) + passed + ==== work-efficient scan, non-power-of-two ==== + elapsed time: 0.132096ms (CUDA Measured) + passed + ==== thrust scan, power-of-two ==== + elapsed time: 0.079872ms (CUDA Measured) + passed + ==== thrust scan, non-power-of-two ==== + elapsed time: 0.090336ms (CUDA Measured) + passed + + ***************************** + ** STREAM COMPACTION TESTS ** + ***************************** + [ 3 3 2 1 3 3 3 3 0 1 3 3 2 ... 1 0 ] + ==== cpu compact without scan, power-of-two ==== + elapsed time: 0.0073ms (std::chrono Measured) + [ 3 3 2 1 3 3 3 3 1 3 3 2 2 ... 2 1 ] + passed + ==== cpu compact without scan, non-power-of-two ==== + elapsed time: 0.0074ms (std::chrono Measured) + [ 3 3 2 1 3 3 3 3 1 3 3 2 2 ... 3 1 ] + passed + ==== cpu compact with scan ==== + elapsed time: 0.0363ms (std::chrono Measured) + [ 3 3 2 1 3 3 3 3 1 3 3 2 2 ... 2 1 ] + passed + ==== work-efficient compact, power-of-two ==== + elapsed time: 0.192512ms (CUDA Measured) + passed + ==== work-efficient compact, non-power-of-two ==== + elapsed time: 0.171008ms (CUDA Measured) + passed + Press any key to continue . . . + ``` -### (TODO: Your README) -Include analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) diff --git a/img/performance.png b/img/performance.png new file mode 100644 index 0000000..b2f3a98 Binary files /dev/null and b/img/performance.png differ diff --git a/src/main.cpp b/src/main.cpp index 896ac2b..ba467c6 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -13,7 +13,7 @@ #include #include "testing_helpers.hpp" -const int SIZE = 1 << 8; // feel free to change the size of array +const int SIZE = 1000; // feel free to change the size of array const int NPOT = SIZE - 3; // Non-Power-Of-Two int *a = new int[SIZE]; int *b = new int[SIZE]; diff --git a/stream_compaction/common.cu b/stream_compaction/common.cu index 2ed6d63..cc36929 100644 --- a/stream_compaction/common.cu +++ b/stream_compaction/common.cu @@ -23,7 +23,15 @@ namespace StreamCompaction { * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { - // TODO + int k = (blockIdx.x * blockDim.x) + threadIdx.x; + if (k >= n) return; + + if (idata[k] != 0) { + bools[k] = 1; + } + else { + bools[k] = 0; + } } /** @@ -32,7 +40,12 @@ namespace StreamCompaction { */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { - // TODO + int k = (blockIdx.x * blockDim.x) + threadIdx.x; + if (k >= n) return; + + if (bools[k] == 1) { + odata[indices[k]] = idata[k]; + } } } diff --git a/stream_compaction/cpu.cu b/stream_compaction/cpu.cu index 719fa11..645af9b 100644 --- a/stream_compaction/cpu.cu +++ b/stream_compaction/cpu.cu @@ -19,7 +19,11 @@ namespace StreamCompaction { */ void scan(int n, int *odata, const int *idata) { timer().startCpuTimer(); - // TODO + // compute an exclusive prefix sum + odata[0] = 0; + for (int i = 1; i < n; i++) { + odata[i] = odata[i - 1] + idata[i - 1]; + } timer().endCpuTimer(); } @@ -30,9 +34,28 @@ namespace StreamCompaction { */ int compactWithoutScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); - // TODO + int elements_remaining = 0; + for (int i = 0; i < n; i++) { + if (idata[i] != 0) { + odata[elements_remaining] = idata[i]; + elements_remaining++; + } + } timer().endCpuTimer(); - return -1; + return elements_remaining; + } + + /* + * Helper Function because I seem to be having issues when I start the timer again + * Same as scan function earlier, just without the timer + * From Piazza @110 + */ + void cpu_scan(int n, int* odata, const int* idata) { + // compute an exclusive prefix sum + odata[0] = 0; + for (int i = 1; i < n; i++) { + odata[i] = odata[i - 1] + idata[i - 1]; + } } /** @@ -42,9 +65,33 @@ namespace StreamCompaction { */ int compactWithScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); - // TODO + // fill temp array with 0 if idata is 0 or 1 otherwise + int* temp_array = new int[n]; + for (int i = 0; i < n; i++) { + if (idata[i] == 0) { + temp_array[i] = 0; + } + else temp_array[i] = 1; + } + + // run exclusive scan on temporary array + int* scanned_array = new int[n]; + cpu_scan(n, scanned_array, temp_array); + + // scatter + for (int j = 0; j < n; j++) { + if (temp_array[j] == 1) { + // write element + odata[scanned_array[j]] = idata[j]; + } + } + + // cleanup + int result = scanned_array[n - 1]; timer().endCpuTimer(); - return -1; + delete[] temp_array; + delete[] scanned_array; + return result; } } } diff --git a/stream_compaction/efficient.cu b/stream_compaction/efficient.cu index 2db346e..47b709c 100644 --- a/stream_compaction/efficient.cu +++ b/stream_compaction/efficient.cu @@ -12,13 +12,106 @@ namespace StreamCompaction { return timer; } + __global__ void kernaldownsweep(int n, int d, int* input) { + int k = (blockIdx.x * blockDim.x) + threadIdx.x; + if (k >= n) return; + + // 2 ^ d + int pow_2d = 1 << d; + // 2 ^ (d+1) + int pow_2d1 = 1 << (d + 1); + + // we want the even indices to add into the odd indices + if (k % pow_2d1 == 0) { + int t = input[k + pow_2d - 1]; + input[k + pow_2d - 1] = input[k + pow_2d1 - 1]; + input[k + pow_2d1 - 1] += t; + } + } + + __global__ void kernalupsweep(int n, int d, int* input) { + int k = (blockIdx.x * blockDim.x) + threadIdx.x; + if (k >= n) return; + + // 2 ^ d + int pow_2d = 1 << d; + // 2 ^ (d+1) + int pow_2d1 = 1 << (d + 1); + + // we want the even indices to add into the odd indices + if (k % pow_2d1 == 0) { + input[k + pow_2d1 - 1] += input[k + pow_2d - 1]; + } + } + /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + + // The idea is to build a balanced binary tree on the input data and + // sweep it to and from the root to compute the prefix sum. A binary + // tree with n leaves has d = log2 n levels, and each level d has 2 d nodes. + + int padded_size = 1 << ilog2ceil(n); + int* temp_array = new int[padded_size]; + + // make sure to pad temp array with 0s! + // to do: is this faster or cuda memcpying 0s faster? hmm + for (int i = 0; i < padded_size; i++) { + if (i < n) { + temp_array[i] = idata[i]; + } + else { + temp_array[i] = 0; + } + } + + // initialize some temporary buffers to write in place + // your intermediate array sizes will need to be rounded to the next power of two. + int* temp_input; + cudaMalloc((void**)&temp_input, padded_size * sizeof(int)); + // fill temp input buffer with the padded array above + cudaMemcpy(temp_input, temp_array, padded_size * sizeof(int), cudaMemcpyHostToDevice); + + // set up the blocks and grids + int blockSize = 64; + dim3 blocksPerGrid((padded_size + blockSize - 1) / blockSize); + dim3 threadsPerBlock(blockSize); + timer().startGpuTimer(); - // TODO + + // The algorithm consists of two phases : + // the reduce phase(also known as the up - sweep phase) + // and the down - sweep phase. + + // up sweep phase + for (int d = 0; d < ilog2ceil(n); d++) { + kernalupsweep << > > (padded_size, d, temp_input); + } + + // replace last index as 0 + int zero = 0; + cudaMemcpy(temp_input + padded_size - 1, &zero, sizeof(int), cudaMemcpyHostToDevice); + + // downsweep phase + for (int d = ilog2ceil(n) - 1; d >= 0; d--) { + kernaldownsweep << > > (padded_size, d, temp_input); + } + timer().endGpuTimer(); + + // copy from GPU to CPU + cudaMemcpy(temp_array, temp_input, padded_size * sizeof(int), cudaMemcpyDeviceToHost); + + // copy into outdata + for (int i = 0; i < n; i++) { + odata[i] = temp_array[i]; + } + + // cleanup + cudaFree(temp_input); + delete[] temp_array; } /** @@ -31,10 +124,87 @@ namespace StreamCompaction { * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { - timer().startGpuTimer(); - // TODO - timer().endGpuTimer(); - return -1; + // we want to setup stuff for scans because we don't want the setup to be within the timer + // your intermediate array sizes will need to be rounded to the next power of two. + int padded_size = 1 << ilog2ceil(n); + int* temp_array = new int[padded_size]; + + int* temp_bool; // stores the bool array on gpu + cudaMalloc((void**)&temp_bool, padded_size * sizeof(int)); + + int* temp_scan_output; // stores the scanned bool array + cudaMalloc((void**)&temp_scan_output, padded_size * sizeof(int)); + + // make sure to pad temp array with 0s! + // to do: is this faster or cuda memcpying 0s faster? hmm + for (int i = 0; i < padded_size; i++) { + if (i < n) { + temp_array[i] = idata[i]; + } + else { + temp_array[i] = 0; + } + } + + int* temp_input; // stores the padded input on the gpu + cudaMalloc((void**)&temp_input, padded_size * sizeof(int)); + // fill with padded data from above + cudaMemcpy(temp_input, idata, n * sizeof(int), cudaMemcpyHostToDevice); + + int* temp_output; // stores the output of the scatter on the gpu + cudaMalloc((void**)&temp_output, padded_size * sizeof(int)); + + // set up the blocks and grids + int blockSize = 128; + dim3 threadsPerBlock(blockSize); + dim3 blocksPerGrid((padded_size + blockSize - 1) / blockSize); + + timer().startGpuTimer(); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + // similar to the cpu... we want to: + // fill temp array with 0 if idata is 0 or 1 otherwise + // ================= MAP TO BOOL ======================= + Common::kernMapToBoolean << > > (padded_size, temp_bool, temp_input); + + // ================= SCAN =========================== + // The algorithm consists of two phases : + // the reduce phase(also known as the up - sweep phase) + // and the down - sweep phase. + + cudaMemcpy(temp_scan_output, temp_bool, padded_size * sizeof(int), cudaMemcpyDeviceToDevice); + + // up sweep phase + for (int d = 0; d < ilog2ceil(n); d++) { + kernalupsweep << > > (padded_size, d, temp_scan_output); + } + + // replace last index as 0 + int zero = 0; + cudaMemcpy(temp_scan_output + padded_size - 1, &zero, sizeof(int), cudaMemcpyHostToDevice); + + // downsweep phase + for (int d = ilog2ceil(n) - 1; d >= 0; d--) { + kernaldownsweep << > > (padded_size, d, temp_scan_output); + } + + // ================= SCATTER ======================= + Common::kernScatter << > > (padded_size, temp_output, temp_input, temp_bool, temp_scan_output); + timer().endGpuTimer(); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + // we want to copy information from gpu to cpu now + cudaMemcpy(odata, temp_output, padded_size * sizeof(int), cudaMemcpyDeviceToHost); + // we also want to print the result of the scan so we can count how many non-zeros there are + int result = -1; + cudaMemcpy(&result, temp_scan_output + padded_size - 1, sizeof(int), cudaMemcpyDeviceToHost); + + // cleanup + cudaFree(temp_output); + cudaFree(temp_input); + cudaFree(temp_bool); + cudaFree(temp_scan_output); + delete[] temp_array; + + return result; } } } diff --git a/stream_compaction/naive.cu b/stream_compaction/naive.cu index 4308876..1143d83 100644 --- a/stream_compaction/naive.cu +++ b/stream_compaction/naive.cu @@ -11,15 +11,74 @@ namespace StreamCompaction { static PerformanceTimer timer; return timer; } - // TODO: __global__ + + __global__ void kernalNaiveScan(int n, int d, int* input, int* output) { + int k = (blockIdx.x * blockDim.x) + threadIdx.x; + if (k >= n) return; + + // if k >= 2 ^ (d - 1) <-- see example 2 in Ch 39 Patch + if (k >= (1 << (d - 1))) { + output[k] = input[k - (1 << (d - 1))] + input[k]; + } + else { + output[k] = input[k]; + } + } + + __global__ void kernalInc2Exc(int n, int* input, int* output) { + int k = (blockIdx.x * blockDim.x) + threadIdx.x; + if (k >= n) return; + + // shift everything to the right + // the default is 0 + if (k == 0) { + output[k] = 0; + } + else { + output[k] = input[k - 1]; + } + } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + // set up the blocks and grids + int blockSize = 64; + dim3 blocksPerGrid((n + blockSize - 1) / blockSize); + dim3 threadsPerBlock(blockSize); + + // initialize some temporary buffers to write in place + int* temp_input; + cudaMalloc((void**)&temp_input, n * sizeof(int)); + // fill temp input buffer with the original input + cudaMemcpy(temp_input, idata, n * sizeof(int), cudaMemcpyHostToDevice); + + int* temp_output; + cudaMalloc((void**)&temp_output, n * sizeof(int)); + timer().startGpuTimer(); - // TODO + // iterate through for d = 1 to d = ilog2ceil(n) + for (int d = 1; d <= ilog2ceil(n); d++) { + // during each time, we want to call kernel to parallel scan + // from input to output + kernalNaiveScan<<>>(n, d, temp_input, temp_output); + + // remember to swap the buffers! + std::swap(temp_input, temp_output); + } + + // we want an exclusive scan so we have to convert + kernalInc2Exc << > > (n, temp_input, temp_output); + timer().endGpuTimer(); + + // now we want to write everything to our real output buffer + cudaMemcpy(odata, temp_output, n * sizeof(int), cudaMemcpyDeviceToHost); + + // cleanup + cudaFree(temp_input); + cudaFree(temp_output); } } } diff --git a/stream_compaction/thrust.cu b/stream_compaction/thrust.cu index 1def45e..19d02b5 100644 --- a/stream_compaction/thrust.cu +++ b/stream_compaction/thrust.cu @@ -17,12 +17,27 @@ namespace StreamCompaction { /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ + + // used help from: https://docs.nvidia.com/cuda/thrust/index.html#:~:text=As%20the%20names%20suggest%2C%20host_vector%20is%20stored%20in,any%20data%20type%29%20that%20can%20be%20resized%20dynamically. void scan(int n, int *odata, const int *idata) { + + // create a host vector + thrust::host_vector host_vector(n); + // initialize individual elements + thrust::copy(idata, idata + n, host_vector.begin()); + + // create a device vector from host vector + thrust::device_vector dv_in = host_vector; + thrust::device_vector dv_out(n); + timer().startGpuTimer(); - // TODO use `thrust::exclusive_scan` - // example: for device_vectors dv_in and dv_out: - // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + + thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + timer().endGpuTimer(); + + thrust::copy(dv_out.begin(), dv_out.end(), odata); + } } }