diff --git a/CMakeLists.txt b/CMakeLists.txt index f654c9e..2930067 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,6 +3,7 @@ cmake_minimum_required(VERSION 3.1) project(cis565_stream_compaction_test) set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") # Enable C++11 for host code set(CMAKE_CXX_STANDARD 11) diff --git a/README.md b/README.md index 0e38ddb..5e390e5 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,92 @@ CUDA Stream Compaction **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2** -* (TODO) YOUR NAME HERE - * (TODO) [LinkedIn](), [personal website](), [twitter](), etc. -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Edward Atter + * [LinkedIn](https://www.linkedin.com/in/atter/) + * Tested on: Linux Mint 18.3 Sylvia (4.13.0-41-generic), Ryzen 7 2700x @ 3.7 ghz (base clock) 16GB, GTX 1070 TI 8GB GDDR5 (Personal) + * CUDA 9 -### (TODO: Your README) +## Overview -Include analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) +This project implements array scanning and stream compaction. Below are some diagrams from GPU Gems. +![Scanning implementation](img/figure-39-2.jpg) + +Compaction is useful to reduce the size if only the true values matter. Think of sparse arrays or only considering object collisions. + +## Performance + +#### Methodology + +Memory operations such as `malloc` or `memset` were excluded from the results. In the case of compaction, only the final step (scatter) is timed. Any preliminary data formatting (for example to get the boolean array or scanning to get the proper indices) is not included in the time. Unless otherwise stated, a block size of 1024 was used throughout the analysis. + +The timing data displayed below is an average across two runs. Ideally, there would be a much higher number of trials, though in practice the timings did not change much. + +#### Analysis + +Large block sizes perform the best. This is likely because each thread does very little work. Specifically, a block size of 1024 was chosen. See the graphs below for a comparison. + +![Time vs Array Size (BS = 256)](img/time-vs-array-bs-256.png) ![Time vs Array Size (BS = 256)](img/time-vs-array-bs-256-zoom.png) + +![Time vs Array Size (BS = 1024)](img/time-vs-array-bs-1024.png) ![Time vs Array Size (BS = 1024)](img/time-vs-array-bs-1024-zoom.png) + +With the exception of thrust, both GPU implementations improve relative to the CPU as the array size increases. The naive solution is never worth using, as it is always the slowest. When the array size grows extremely large, the efficient implementation beats even the standard thrust library. Any performance bottlenecks are IO related. It can be improved significantly by using shared instead of global memory within blocks. + +It should be noted that thrust is really at an unfair advantage in these tests. Memory allocations are not included in the performance tests for any of the custom built solutions. The timing for thrust, however, includes all the necessary memory allocations. + +On examination, in addition to watching `top` and `nvidia-smi`, I believe thrust uses the CPU for small array sizes, and switches to utilizing the GPU when the array size reaches a sufficient length. + +## Program Output + + **************** + ** SCAN TESTS ** + **************** + [ 40 41 8 46 3 10 25 38 24 42 12 31 35 ... 16 0 ] + ==== cpu scan, power-of-two ==== + elapsed time: 0.14779ms (std::chrono Measured) + [ 0 40 81 89 135 138 148 173 211 235 277 289 320 ... 6428057 6428073 ] + ==== cpu scan, non-power-of-two ==== + elapsed time: 0.14218ms (std::chrono Measured) + [ 0 40 81 89 135 138 148 173 211 235 277 289 320 ... 6427957 6427981 ] + passed + ==== naive scan, power-of-two ==== + elapsed time: 0.163936ms (CUDA Measured) + passed + ==== naive scan, non-power-of-two ==== + elapsed time: 0.109696ms (CUDA Measured) + passed + ==== work-efficient scan, power-of-two ==== + elapsed time: 0.131392ms (CUDA Measured) + passed + ==== work-efficient scan, non-power-of-two ==== + elapsed time: 0.119008ms (CUDA Measured) + passed + ==== thrust scan, power-of-two ==== + elapsed time: 0.221088ms (CUDA Measured) + passed + ==== thrust scan, non-power-of-two ==== + elapsed time: 0.22096ms (CUDA Measured) + passed + + ***************************** + ** STREAM COMPACTION TESTS ** + ***************************** + [ 2 1 0 2 1 2 1 2 2 2 0 1 1 ... 0 0 ] + ==== cpu compact without scan, power-of-two ==== + elapsed time: 1.05048ms (std::chrono Measured) + [ 2 1 2 1 2 1 2 2 2 1 1 3 3 ... 3 3 ] + passed + ==== cpu compact without scan, non-power-of-two ==== + elapsed time: 1.05846ms (std::chrono Measured) + [ 2 1 2 1 2 1 2 2 2 1 1 3 3 ... 2 3 ] + passed + ==== cpu compact with scan ==== + elapsed time: 1.09453ms (std::chrono Measured) + [ 2 1 2 1 2 1 2 2 2 1 1 3 3 ... 3 3 ] + passed + ==== work-efficient compact, power-of-two ==== + elapsed time: 0.022336ms (CUDA Measured) + passed + ==== work-efficient compact, non-power-of-two ==== + elapsed time: 0.024768ms (CUDA Measured) + passed diff --git a/img/time-vs-array-bs-1024-zoom.png b/img/time-vs-array-bs-1024-zoom.png new file mode 100644 index 0000000..22bf6c1 Binary files /dev/null and b/img/time-vs-array-bs-1024-zoom.png differ diff --git a/img/time-vs-array-bs-1024.png b/img/time-vs-array-bs-1024.png new file mode 100644 index 0000000..502a224 Binary files /dev/null and b/img/time-vs-array-bs-1024.png differ diff --git a/img/time-vs-array-bs-256-zoom.png b/img/time-vs-array-bs-256-zoom.png new file mode 100644 index 0000000..db7a06c Binary files /dev/null and b/img/time-vs-array-bs-256-zoom.png differ diff --git a/img/time-vs-array-bs-256.png b/img/time-vs-array-bs-256.png new file mode 100644 index 0000000..d86cb0b Binary files /dev/null and b/img/time-vs-array-bs-256.png differ diff --git a/src/main.cpp b/src/main.cpp index 1850161..0d9ab4a 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -13,7 +13,7 @@ #include #include "testing_helpers.hpp" -const int SIZE = 1 << 8; // feel free to change the size of array +const int SIZE = 1 << 18; // feel free to change the size of array const int NPOT = SIZE - 3; // Non-Power-Of-Two int *a = new int[SIZE]; int *b = new int[SIZE]; @@ -147,7 +147,7 @@ int main(int argc, char* argv[]) { //printArray(count, c, true); printCmpLenResult(count, expectedNPOT, b, c); - system("pause"); // stop Win32 console from closing on exit + //system("pause"); // stop Win32 console from closing on exit delete[] a; delete[] b; delete[] c; diff --git a/stream_compaction/CMakeLists.txt b/stream_compaction/CMakeLists.txt index cdbef77..e31ca3c 100644 --- a/stream_compaction/CMakeLists.txt +++ b/stream_compaction/CMakeLists.txt @@ -13,5 +13,5 @@ set(SOURCE_FILES cuda_add_library(stream_compaction ${SOURCE_FILES} - OPTIONS -arch=sm_20 + OPTIONS -arch=sm_30 ) diff --git a/stream_compaction/common.cu b/stream_compaction/common.cu index 8fc0211..d54e81a 100644 --- a/stream_compaction/common.cu +++ b/stream_compaction/common.cu @@ -23,7 +23,14 @@ namespace StreamCompaction { * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { - // TODO + int i = threadIdx.x + blockIdx.x * blockDim.x; + if (i >= n) { return; } + + if (idata[i] != 0) { + bools[i] = 1; + } else { + bools[i] = 0; + } } /** @@ -32,7 +39,25 @@ namespace StreamCompaction { */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { - // TODO + int i = threadIdx.x + blockIdx.x * blockDim.x; + + int outIndex = -1; + if (bools[i] == 1) { + outIndex = indices[i]; + odata[outIndex] = idata[i]; + } + } + + __global__ void kernConvertScanToExclusive(int n, int exclusiveScan[], const int inclusiveScan[]) { + int idx = threadIdx.x + (blockIdx.x * blockDim.x); + if (idx >= n) { + return; + } else if (idx >= 1) { + exclusiveScan[idx] = inclusiveScan[idx - 1]; + return; + } + + exclusiveScan[0] = 0; } } diff --git a/stream_compaction/common.h b/stream_compaction/common.h index 99a1b04..9c74268 100644 --- a/stream_compaction/common.h +++ b/stream_compaction/common.h @@ -12,6 +12,7 @@ #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) +#define BLOCK_SIZE 1024 /** * Check for CUDA errors; print and exit if there was a problem. @@ -37,6 +38,8 @@ namespace StreamCompaction { __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices); + __global__ void kernConvertScanToExclusive(int n, int exclusiveScan[], const int inclusiveScan[]); + /** * This class is used for timing the performance * Uncopyable and unmovable @@ -114,6 +117,8 @@ namespace StreamCompaction { PerformanceTimer& operator=(const PerformanceTimer&) = delete; PerformanceTimer& operator=(PerformanceTimer&&) = delete; + + private: cudaEvent_t event_start = nullptr; cudaEvent_t event_end = nullptr; diff --git a/stream_compaction/cpu.cu b/stream_compaction/cpu.cu index 05ce667..8e18fca 100644 --- a/stream_compaction/cpu.cu +++ b/stream_compaction/cpu.cu @@ -19,10 +19,19 @@ namespace StreamCompaction { */ void scan(int n, int *odata, const int *idata) { timer().startCpuTimer(); - // TODO + scanNoTimer(n, odata, idata); timer().endCpuTimer(); } + void scanNoTimer(int n, int *odata, const int *idata) { + odata[0] = 0; //Add identity + int sum = 0; + for (int i = 0; i < n - 1; i++){ + sum += idata[i]; + odata[i + 1] = sum; + } + } + /** * CPU stream compaction without using the scan function. * @@ -30,9 +39,16 @@ namespace StreamCompaction { */ int compactWithoutScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); - // TODO + int index = 0; + + for (int i = 0; i < n; i++) { + if (idata[i] != 0){ + odata[index] = idata[i]; + index ++; + } + } timer().endCpuTimer(); - return -1; + return index; } /** @@ -41,10 +57,27 @@ namespace StreamCompaction { * @returns the number of elements remaining after compaction. */ int compactWithScan(int n, int *odata, const int *idata) { - timer().startCpuTimer(); - // TODO + int idx = 0; + int booleanArray[n]; + timer().startCpuTimer(); + for (int i = 0; i < n; i++) { + if (idata[i] != 0) { + booleanArray[i] = 1; + } else { + booleanArray[i] = 0; + } + } + int indexArray[n]; + scanNoTimer(n, indexArray, booleanArray); + // Finally, scatter + for (int i = 0; i < n; i++) { + if (idata[i] != 0) { + idx = indexArray[i]; + odata[idx] = idata[i]; + } + } timer().endCpuTimer(); - return -1; + return idx + 1; } } } diff --git a/stream_compaction/cpu.h b/stream_compaction/cpu.h index 236ce11..47dafdf 100644 --- a/stream_compaction/cpu.h +++ b/stream_compaction/cpu.h @@ -8,6 +8,8 @@ namespace StreamCompaction { void scan(int n, int *odata, const int *idata); + void scanNoTimer(int n, int *odata, const int *idata); + int compactWithoutScan(int n, int *odata, const int *idata); int compactWithScan(int n, int *odata, const int *idata); diff --git a/stream_compaction/efficient.cu b/stream_compaction/efficient.cu index 36c5ef2..2fdbca7 100644 --- a/stream_compaction/efficient.cu +++ b/stream_compaction/efficient.cu @@ -5,6 +5,9 @@ namespace StreamCompaction { namespace Efficient { + + int * dev_data; + using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { @@ -12,13 +15,86 @@ namespace StreamCompaction { return timer; } + __global__ void kernUpSweep(int n, int pow, int pow1, int data[]) { + int idx = threadIdx.x + (blockIdx.x * blockDim.x); + if (idx >= n) { + return; + } + + int i = idx * pow1; + if (i < n) { + data[i + pow1 - 1] += data[i + pow - 1]; + } + } + + __global__ void kernDownSweep(int n, int pow, int pow1, int data[]) { + int idx = threadIdx.x + (blockIdx.x * blockDim.x); + if (idx >= n) { return; } + + int i = idx * pow1; + if (i < n) { + // Swap and sum + int aux = data[i + pow - 1]; + data[i + pow - 1] = data[i + pow1 - 1]; + data[i + pow1 - 1] += aux; + } + } + /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + int ceil = ilog2ceil(n); + int ceilN = 1 << ceil; + + cudaMalloc((void**) &dev_data, ceilN * sizeof(int)); + checkCUDAError("malloc dev_data failed"); + cudaMemset(dev_data, 0, n * sizeof(int)); + checkCUDAError("cudaMemset to clear array failed"); + + cudaMemcpy(dev_data, idata, n * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy input host to device failed"); + //cudaMemcpy(dev_odata, dev_idata, n * sizeof(int), cudaMemcpyDeviceToDevice); + + int pow, pow1, blocksPerGrid; + timer().startGpuTimer(); - // TODO + // Need two separate kernels, one for upsweep and one for down to ensure everything stays in sync + // Can we just use sync_threads? No, becaue potentially multiple blocks + // 1. upsweep (note it updates in place, hopefully this is okay? Just summing) + // 2. Reset end of array to 0 + // 3. Downsweep + for (int d = 0; d < ceil; d++) { + //pow = std::pow(2, d); + //pow1 = std::pow(2, d + 1); + pow = 1 << d; + pow1 = 1 << (d + 1); + blocksPerGrid = (ceilN / pow1 + BLOCK_SIZE - 1) / BLOCK_SIZE; + kernUpSweep<<< blocksPerGrid, BLOCK_SIZE >>>(ceilN, pow, pow1, dev_data); + checkCUDAError("kernUpSweep failed"); + } + + // Reset last value + //int z = 0; + //cudaMemcpy(dev_data + ceilN - 1, &z, sizeof(int), cudaMemcpyHostToDevice); + cudaMemset(dev_data + ceilN - 1, 0, sizeof(int)); + checkCUDAError("cudaMemcpy zero failed"); + //dev_data[ceilN - 1] = 0; + + //for (int d = 0; d < ceil; d++) { start at end instead + for (int d = ceil - 1; d >= 0; d--){ + pow = 1 << d; + pow1 = 1 << (d + 1); + blocksPerGrid = (ceilN / pow1 + BLOCK_SIZE - 1) / BLOCK_SIZE; + kernDownSweep<<< blocksPerGrid, BLOCK_SIZE >>>(ceilN, pow, pow1, dev_data); + checkCUDAError("kernDownSweep failed"); + } timer().endGpuTimer(); + + cudaMemcpy(odata, dev_data, n * sizeof(int), cudaMemcpyDeviceToHost); + checkCUDAError("memcpy answer to host failed"); + + cudaFree(dev_data); } /** @@ -31,10 +107,48 @@ namespace StreamCompaction { * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { + int * dev_bools; + int * dev_indices; + int * dev_scatter; + int * dev_input; + cudaMalloc((void**) &dev_bools, n * sizeof(int)); + cudaMalloc((void**) &dev_indices, n * sizeof(int)); + cudaMalloc((void**) &dev_scatter, n * sizeof(int)); + cudaMalloc((void**) &dev_input, n * sizeof(int)); + cudaMemcpy(dev_input, idata, n * sizeof(int), cudaMemcpyHostToDevice); + + int host_indices[n]; + int host_bools[n]; + + int blocksPerGrid = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; + + // 1. Create boolean array + Common::kernMapToBoolean<<< blocksPerGrid, BLOCK_SIZE >>>(n, dev_bools, dev_input); + cudaMemcpy(host_bools, dev_bools, n * sizeof(int), cudaMemcpyDeviceToHost); + // 2. Scan to generate indices + scan(n, host_indices, host_bools); + cudaMemcpy(dev_indices, host_indices, n * sizeof(int), cudaMemcpyHostToDevice); + // 3. Scatter timer().startGpuTimer(); - // TODO + Common::kernScatter<<< blocksPerGrid, BLOCK_SIZE >>>(n, dev_scatter, dev_input, dev_bools, dev_indices); timer().endGpuTimer(); - return -1; + + // Copy to output + cudaMemcpy(odata, dev_scatter, n * sizeof(int), cudaMemcpyDeviceToHost); + + // Memory cleanup + cudaFree(dev_bools); + cudaFree(dev_indices); + cudaFree(dev_scatter); + cudaFree(dev_input); + + // Beware! Since exclusive scan, we won't count last element in + // indices, let's fix that + if (host_bools[n - 1] != 0) { + return host_indices[n - 1] + 1; + } else { + return host_indices[n-1]; + } } } } diff --git a/stream_compaction/naive.cu b/stream_compaction/naive.cu index 9218f8e..dc2bbe5 100644 --- a/stream_compaction/naive.cu +++ b/stream_compaction/naive.cu @@ -5,21 +5,68 @@ namespace StreamCompaction { namespace Naive { + int* naive_idata; + int* naive_odata; + using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } - // TODO: __global__ + // __global__ + __global__ void kernNaiveScan(int n, int pow, int * odata, int * idata) { + int idx = threadIdx.x + (blockIdx.x * blockDim.x); + if (idx >= n) { + return; + } + + if (idx >= pow) { + odata[idx] = idata[idx - pow] + idata[idx]; + } else { + odata[idx] = idata[idx]; + } + + } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + cudaMalloc((void**) &naive_idata, n * sizeof(int)); + checkCUDAError("Failed to allocate idata"); + cudaMalloc((void**) &naive_odata, n * sizeof(int)); + checkCUDAError("Failed to allocate odata"); + + //Transfer from host memory to device + cudaMemcpy(naive_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy failed (initial)"); + + int blocksPerGrid = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; + timer().startGpuTimer(); - // TODO + int logValue = ilog2ceil(n); + int pow; + for (int d = 1; d <= logValue; d++){ + pow = 1 << d - 1; + kernNaiveScan<<< blocksPerGrid, BLOCK_SIZE >>>(n, pow, naive_odata, naive_idata); + checkCUDAError("kernNaiveScan failed"); + + std::swap(naive_odata, naive_idata); + } + //std::swap(naive_odata, naive_idata); + + Common::kernConvertScanToExclusive<<< blocksPerGrid, BLOCK_SIZE >>>(n, naive_odata, naive_idata); + checkCUDAError("kernConvertScanToExclusive failed"); timer().endGpuTimer(); + + cudaMemcpy(odata, naive_odata, n * sizeof(int), cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy failed (final)"); + + cudaFree(naive_idata); + checkCUDAError("cudaFree idata failed"); + cudaFree(naive_odata); + checkCUDAError("cudaFree odata failed"); } } } diff --git a/stream_compaction/thrust.cu b/stream_compaction/thrust.cu index 36b732d..94bedb8 100644 --- a/stream_compaction/thrust.cu +++ b/stream_compaction/thrust.cu @@ -22,6 +22,7 @@ namespace StreamCompaction { // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + thrust::exclusive_scan(idata, idata + n, odata); timer().endGpuTimer(); } }