Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 78 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,84 @@ CUDA Stream Compaction

**University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2**

* (TODO) YOUR NAME HERE
* (TODO) [LinkedIn](), [personal website](), [twitter](), etc.
* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab)
* Haoyu Sui
* [LinkedIn](http://linkedin.com/in/haoyu-sui-721284192)
* Tested on: Windows 10, i5-9600K @ 3.70GHz 16GB, RTX 2070 SUPER 8GB
* SM:7.5

### (TODO: Your README)
### Features
* CPU Scan & Stream Compaction
* Naive GPU Scan Algorithm
* Work-Efficient GPU Scan & Stream Compaction
* Using Thrust's Implementation

Include analysis, etc. (Remember, this is public, so don't put
anything here that you don't want to share with the world.)
### Analysis

**Roughly optimize for each implementations**

In order to find the appropriate block size to make each method produce the best performance, I tried several different block sizes for each method: 32, 64, 128, 256, 512. The results can be seen in the table below. (Array size is 2^20)

![](img/BlockSize.png)

**Performance analysis**

I chose a block size of 128, and then performed a different array size performance test for each method. It can be seen from the figure that when the array size is relatively large, the performance advantage of GPU is more obvious. Among the three different GPU methods, Thrust Scan and Efficient Scan have better performance than Naive Scan.

![](img/ArraySize.png)


### Output in cmd
```
****************
** SCAN TESTS **
****************
[ 8 9 43 42 23 4 8 38 23 10 27 11 44 ... 45 0 ]
==== cpu scan, power-of-two ====
elapsed time: 3.9358ms (std::chrono Measured)
[ 0 8 17 60 102 125 129 137 175 198 208 235 246 ... 25659338 25659383 ]
==== cpu scan, non-power-of-two ====
elapsed time: 1.4142ms (std::chrono Measured)
[ 0 8 17 60 102 125 129 137 175 198 208 235 246 ... 25659264 25659282 ]
passed
==== naive scan, power-of-two ====
elapsed time: 0.485664ms (CUDA Measured)
passed
==== naive scan, non-power-of-two ====
elapsed time: 0.485152ms (CUDA Measured)
passed
==== work-efficient scan, power-of-two ====
elapsed time: 0.219872ms (CUDA Measured)
passed
==== work-efficient scan, non-power-of-two ====
elapsed time: 0.19888ms (CUDA Measured)
passed
==== thrust scan, power-of-two ====
elapsed time: 0.204576ms (CUDA Measured)
passed
==== thrust scan, non-power-of-two ====
elapsed time: 0.214432ms (CUDA Measured)
passed

*****************************
** STREAM COMPACTION TESTS **
*****************************
[ 1 1 1 2 1 1 0 1 2 0 1 3 1 ... 0 0 ]
==== cpu compact without scan, power-of-two ====
elapsed time: 2.5239ms (std::chrono Measured)
[ 1 1 1 2 1 1 1 2 1 3 1 2 3 ... 1 1 ]
passed
==== cpu compact without scan, non-power-of-two ====
elapsed time: 2.2133ms (std::chrono Measured)
[ 1 1 1 2 1 1 1 2 1 3 1 2 3 ... 1 1 ]
passed
==== cpu compact with scan ====
elapsed time: 8.8561ms (std::chrono Measured)
[ 1 1 1 2 1 1 1 2 1 3 1 2 3 ... 1 1 ]
passed
==== work-efficient compact, power-of-two ====
elapsed time: 1.19734ms (CUDA Measured)
passed
==== work-efficient compact, non-power-of-two ====
elapsed time: 0.7928ms (CUDA Measured)
passed
```
Binary file added img/ArraySize.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added img/BlockSize.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
#include <stream_compaction/thrust.h>
#include "testing_helpers.hpp"

const int SIZE = 1 << 8; // feel free to change the size of array
const int SIZE = 1 << 20; // feel free to change the size of array
const int NPOT = SIZE - 3; // Non-Power-Of-Two
int *a = new int[SIZE];
int *b = new int[SIZE];
Expand Down
17 changes: 16 additions & 1 deletion stream_compaction/common.cu
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,12 @@ namespace StreamCompaction {
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n) {
return;
}

bools[index] = idata[index] != 0 ? 1 : 0;
}

/**
Expand All @@ -33,7 +39,16 @@ namespace StreamCompaction {
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
}
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n) {
return;
}

if(bools[index] == 1)
{
int oIndex = indices[index];
odata[oIndex] = idata[index];
}
}
}
}
2 changes: 2 additions & 0 deletions stream_compaction/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)

#define blockSize 128

/**
* Check for CUDA errors; print and exit if there was a problem.
*/
Expand Down
54 changes: 51 additions & 3 deletions stream_compaction/cpu.cu
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,11 @@ namespace StreamCompaction {
void scan(int n, int *odata, const int *idata) {
timer().startCpuTimer();
// TODO
odata[0] = 0;
for (int i = 1; i < n; ++i)
{
odata[i] = idata[i - 1] + odata[i - 1];
}
timer().endCpuTimer();
}

Expand All @@ -31,8 +36,17 @@ namespace StreamCompaction {
int compactWithoutScan(int n, int *odata, const int *idata) {
timer().startCpuTimer();
// TODO
int oIndex = 0;
for(int i = 0; i < n; ++i)
{
if(idata[i] != 0)
{
odata[oIndex] = idata[i];
oIndex ++;
}
}
timer().endCpuTimer();
return -1;
return oIndex;
}

/**
Expand All @@ -41,10 +55,44 @@ namespace StreamCompaction {
* @returns the number of elements remaining after compaction.
*/
int compactWithScan(int n, int *odata, const int *idata) {
int *mappedArr = new int[n];
int *scannedArr = new int[n];
timer().startCpuTimer();
// TODO

// Compute temporary array containing 1 and 0
for(int i = 0; i < n; ++i)
{
if(idata[i] != 0)
{
mappedArr[i] = 1;
}
else
{
mappedArr[i] = 0;
}
}

// Run exclusive scan on mapped array
scannedArr[0] = 0;
for (int i = 1; i < n; ++i)
{
scannedArr[i] = mappedArr[i - 1] + scannedArr[i - 1];
}

// Scatter
int oCount = 0;
for(int i = 0; i < n; ++i)
{
if(mappedArr[i] != 0)
{
int index = scannedArr[i];
odata[index] = idata[i];
oCount ++;
}
}

timer().endCpuTimer();
return -1;
return oCount;
}
}
}
144 changes: 141 additions & 3 deletions stream_compaction/efficient.cu
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,83 @@ namespace StreamCompaction {
return timer;
}

__global__ void kernUpSweep(int n_pot, int* data, int d)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;

int temp_d = 1 << (d + 1);
int k = index * temp_d;

if (k >= n_pot) {
return;
}

int power1 = 1 << (d + 1);
int power2 = 1 << d;
data[k + power1 - 1] += data[k + power2 - 1];
}

__global__ void kernDownSweep(int n_pot, int* data, int d)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;

int temp_d = 1 << (d + 1);
int k = index * temp_d;

if (k >= n_pot) {
return;
}

int power1 = 1 << (d + 1);
int power2 = 1 << d;
int t = data[k + power2 - 1];
data[k + power2 - 1] = data[k + power1 - 1];
data[k + power1 - 1] += t;
}

/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int *dev_data;

// Get power of two length
int logValue = ilog2ceil(n);
int n_pot = 1 << logValue;

// CUDA memory arrangement and error checking
cudaMalloc((void**)&dev_data, n_pot * sizeof(int));
checkCUDAError("cudaMalloc dev_data failed!");

cudaMemset(dev_data, 0, n_pot * sizeof(int));
checkCUDAError("cudaMemset dev_data failed!");

cudaMemcpy(dev_data, idata, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy from idata to dev_data failed!");

timer().startGpuTimer();
// TODO

// Up-Sweep
for(int d = 0; d <= ilog2ceil(n) - 1; ++d)
{
dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize);
kernUpSweep<<<blocksPerGrid, blockSize>>>(n_pot, dev_data, d);
}

// Down-Sweep
cudaMemset(dev_data + n_pot - 1, 0, sizeof(int));
for(int d = ilog2ceil(n) - 1; d >=0; --d)
{
dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize);
kernDownSweep<<<blocksPerGrid, blockSize>>>(n_pot, dev_data, d);
}

timer().endGpuTimer();

cudaMemcpy(odata, dev_data, n * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy from dev_data to idata failed!");

cudaFree(dev_data);
}

/**
Expand All @@ -31,10 +101,78 @@ namespace StreamCompaction {
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {

int* dev_idata;
int* dev_boolData;
int* dev_indices;
int* dev_odata;


int logValue = ilog2ceil(n);
int n_pot = 1 << logValue;

cudaMalloc((void**)&dev_idata, n_pot * sizeof(int));
checkCUDAError("cudaMalloc dev_idata failed!");

cudaMalloc((void**)&dev_boolData, n_pot * sizeof(int));
checkCUDAError("cudaMalloc dev_boolData failed!");

cudaMalloc((void**)&dev_indices, n_pot * sizeof(int));
checkCUDAError("cudaMalloc dev_indices failed!");

cudaMemset(dev_idata, 0, n_pot * sizeof(int));
checkCUDAError("cudaMemset dev_idata failed!");

cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy from idata to dev_idata failed!");

timer().startGpuTimer();
// TODO

dim3 mapBlocksPerGrid((n_pot + blockSize - 1) / blockSize);

// Compute temporary array containing 1 and 0
Common::kernMapToBoolean<<<mapBlocksPerGrid, blockSize>>>(n_pot, dev_boolData, dev_idata);

cudaMemcpy(dev_indices, dev_boolData, n_pot * sizeof(int), cudaMemcpyDeviceToDevice);
checkCUDAError("cudaMemcpy from dev_boolData to dev_indices failed!");

// Run exclusive scan on mapped array
// Up-Sweep
for(int d = 0; d <= ilog2ceil(n) - 1; ++d)
{
dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize);
kernUpSweep<<<blocksPerGrid, blockSize>>>(n_pot, dev_indices, d);
}

// Down-Sweep
cudaMemset(dev_indices + n_pot - 1, 0, sizeof(int));
for(int d = ilog2ceil(n) - 1; d >=0; --d)
{
dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize);
kernDownSweep<<<blocksPerGrid, blockSize>>>(n_pot, dev_indices, d);
}

// Scatter

int arrayCount = 0;
cudaMemcpy(&arrayCount, dev_indices + n_pot - 1, sizeof(int), cudaMemcpyDeviceToHost);

cudaMalloc((void**)&dev_odata, arrayCount * sizeof(int));
checkCUDAError("cudaMalloc dev_odata failed!");

Common::kernScatter<<<mapBlocksPerGrid, blockSize>>>(n_pot, dev_odata, dev_idata, dev_boolData, dev_indices);

timer().endGpuTimer();
return -1;

cudaMemcpy(odata, dev_odata, arrayCount * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy from dev_odata to odata failed!");

cudaFree(dev_idata);
cudaFree(dev_boolData);
cudaFree(dev_indices);
cudaFree(dev_odata);

return arrayCount;
}
}
}
Loading