diff --git a/README.md b/README.md index d63a6a1..50c7903 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,31 @@ **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 1 - Flocking** -* (TODO) YOUR NAME HERE - * (TODO) [LinkedIn](), [personal website](), [twitter](), etc. -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Yuru Wang +* Tested on: Windows 10, i7-7700HQ @ 2.5GHz 128GB, GTX 1050 Ti 8GB (personal computer) +* Modified CMakeList.txt: changed sm_20 to sm_60 inside cuda_add_library -### (TODO: Your README) +### Result +**Naive Implementation** +![](images/gif1.gif) +**Uniform Grid Implementation** +![](images/1.gif) -Include screenshots, analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) +### Performance Testing +![](images/fps_boids_visual.jpg) +![](images/fps_boids_noVisual.jpg) +![](images/fps_blockSize.jpg) +![](images/fps_cells.jpg) + +### Questions +#### For each implementation, how does changing the number of boids affect performance? Why do you think this is? +As the diagram indicates, for all three implementations, increasing the number of boids would decrease the FPS.As more boids are added to the scene, for each boid, the number of its neighbor boids would also increase, which requires more computing time for calculating its new velocity. + +#### For each implementation, how does changing the block count and block size affect performance? Why do you think this is? +From the char we can see that, for naive implementation, as the block size increase, the performance is also increased. However for the uniform grid implementation, the performance is improved as the block size increasing at the beginning, then the fps reaches a peak and start to drop. So I think there exists a best block size for every GPU hardware configuration. If the block size is too small, the number of threads in one block would be too small or even less than the number of threads one warp has. Since SM process one warp of one block at a time, if the number of threads one block is very small, it would be a waste of SM’s processing power. Meanwhile, if the block size is too large, one block would have multiple warps of threads, which would require more time for SM to deal with. + +#### For the coherent uniform grid: did you experience any performance improvements with the more coherent uniform grid? Was this the outcome you expected? Why or why not? +The performance of coherent uniform grid is better than scattered one’s as observed from the diagram. This is the expected outcome because for the scattered uniform grid implementation, the pointers to the boid position and velocity data are sorted by the grid indices, but those pointers are referring to discontinuous memory location. While for the coherent uniform grid implementation, the position and velocity data are sort directly and then resides on continuous memory location, which increase the memory accessing speed. + +#### Did changing cell width and checking 27 vs 8 neighboring cells affect performance? Why or why not? Be careful: it is insufficient (and possibly incorrect) to say that 27-cell is slower simply because there are more cells to check! +Changing the cell width did affect the performance. Setting the cell width as twice the neighboring distance means we only need to check at most 8 neighboring cells, while setting the cell width equals to the neighboring distance, we need to check all surrounding 27 cells to make sure all possible influencing boids are checked. Event though the 27 cells means more looping than 8 cells, but we need to notice that the volume the neighboring cells occupied are different as well. When the number of boids are large, checking 27 cells actually checks less boids than with 8 cells, which gives better performance. diff --git a/analysis.docx b/analysis.docx new file mode 100644 index 0000000..2d6c5ac Binary files /dev/null and b/analysis.docx differ diff --git a/data.xlsx b/data.xlsx new file mode 100644 index 0000000..bc5b281 Binary files /dev/null and b/data.xlsx differ diff --git a/images/1.gif b/images/1.gif new file mode 100644 index 0000000..5798b43 Binary files /dev/null and b/images/1.gif differ diff --git a/images/fps_blockSize.jpg b/images/fps_blockSize.jpg new file mode 100644 index 0000000..0738aea Binary files /dev/null and b/images/fps_blockSize.jpg differ diff --git a/images/fps_boids_noVisual.jpg b/images/fps_boids_noVisual.jpg new file mode 100644 index 0000000..b1b42cb Binary files /dev/null and b/images/fps_boids_noVisual.jpg differ diff --git a/images/fps_boids_visual.jpg b/images/fps_boids_visual.jpg new file mode 100644 index 0000000..a8d4d7c Binary files /dev/null and b/images/fps_boids_visual.jpg differ diff --git a/images/fps_cells.jpg b/images/fps_cells.jpg new file mode 100644 index 0000000..3d41c02 Binary files /dev/null and b/images/fps_cells.jpg differ diff --git a/images/gif1.gif b/images/gif1.gif new file mode 100644 index 0000000..99d3255 Binary files /dev/null and b/images/gif1.gif differ diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index fdd636d..b737097 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -10,5 +10,5 @@ set(SOURCE_FILES cuda_add_library(src ${SOURCE_FILES} - OPTIONS -arch=sm_20 + OPTIONS -arch=sm_61 ) diff --git a/src/kernel.cu b/src/kernel.cu index 74dffcb..943ab5c 100644 --- a/src/kernel.cu +++ b/src/kernel.cu @@ -21,14 +21,14 @@ * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { - cudaError_t err = cudaGetLastError(); - if (cudaSuccess != err) { - if (line >= 0) { - fprintf(stderr, "Line %d: ", line); - } - fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); - exit(EXIT_FAILURE); - } + cudaError_t err = cudaGetLastError(); + if (cudaSuccess != err) { + if (line >= 0) { + fprintf(stderr, "Line %d: ", line); + } + fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } } @@ -76,15 +76,16 @@ glm::vec3 *dev_vel2; // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? -// needed for use with thrust + // needed for use with thrust thrust::device_ptr dev_thrust_particleArrayIndices; thrust::device_ptr dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? -// TODO-2.3 - consider what additional buffers you might need to reshuffle -// the position and velocity data to be coherent within cells. + // TODO-2.3 - consider what additional buffers you might need to reshuffle + // the position and velocity data to be coherent within cells. +glm::vec3 *dev_pos2; // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation @@ -99,13 +100,13 @@ glm::vec3 gridMinimum; ******************/ __host__ __device__ unsigned int hash(unsigned int a) { - a = (a + 0x7ed55d16) + (a << 12); - a = (a ^ 0xc761c23c) ^ (a >> 19); - a = (a + 0x165667b1) + (a << 5); - a = (a + 0xd3a2646c) ^ (a << 9); - a = (a + 0xfd7046c5) + (a << 3); - a = (a ^ 0xb55a4f09) ^ (a >> 16); - return a; + a = (a + 0x7ed55d16) + (a << 12); + a = (a ^ 0xc761c23c) ^ (a >> 19); + a = (a + 0x165667b1) + (a << 5); + a = (a + 0xd3a2646c) ^ (a << 9); + a = (a + 0xfd7046c5) + (a << 3); + a = (a ^ 0xb55a4f09) ^ (a >> 16); + return a; } /** @@ -113,10 +114,10 @@ __host__ __device__ unsigned int hash(unsigned int a) { * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { - thrust::default_random_engine rng(hash((int)(index * time))); - thrust::uniform_real_distribution unitDistrib(-1, 1); + thrust::default_random_engine rng(hash((int)(index * time))); + thrust::uniform_real_distribution unitDistrib(-1, 1); - return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); + return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** @@ -124,52 +125,68 @@ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { - int index = (blockIdx.x * blockDim.x) + threadIdx.x; - if (index < N) { - glm::vec3 rand = generateRandomVec3(time, index); - arr[index].x = scale * rand.x; - arr[index].y = scale * rand.y; - arr[index].z = scale * rand.z; - } + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index < N) { + glm::vec3 rand = generateRandomVec3(time, index); + arr[index].x = scale * rand.x; + arr[index].y = scale * rand.y; + arr[index].z = scale * rand.z; + } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { - numObjects = N; - dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); - - // LOOK-1.2 - This is basic CUDA memory management and error checking. - // Don't forget to cudaFree in Boids::endSimulation. - cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); - checkCUDAErrorWithLine("cudaMalloc dev_pos failed!"); - - cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); - checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!"); - - cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); - checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!"); - - // LOOK-1.2 - This is a typical CUDA kernel invocation. - kernGenerateRandomPosArray<<>>(1, numObjects, - dev_pos, scene_scale); - checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); - - // LOOK-2.1 computing grid params - gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance); - int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; - gridSideCount = 2 * halfSideCount; - - gridCellCount = gridSideCount * gridSideCount * gridSideCount; - gridInverseCellWidth = 1.0f / gridCellWidth; - float halfGridWidth = gridCellWidth * halfSideCount; - gridMinimum.x -= halfGridWidth; - gridMinimum.y -= halfGridWidth; - gridMinimum.z -= halfGridWidth; - - // TODO-2.1 TODO-2.3 - Allocate additional buffers here. - cudaDeviceSynchronize(); + numObjects = N; + dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); + + // LOOK-1.2 - This is basic CUDA memory management and error checking. + // Don't forget to cudaFree in Boids::endSimulation. + cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); + checkCUDAErrorWithLine("cudaMalloc dev_pos failed!"); + + cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); + checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!"); + + cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); + checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!"); + + // LOOK-1.2 - This is a typical CUDA kernel invocation. + kernGenerateRandomPosArray << > >(1, numObjects, + dev_pos, scene_scale); + checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); + + // LOOK-2.1 computing grid params + gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance); + int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; + gridSideCount = 2 * halfSideCount; + + gridCellCount = gridSideCount * gridSideCount * gridSideCount; + gridInverseCellWidth = 1.0f / gridCellWidth; + float halfGridWidth = gridCellWidth * halfSideCount; + gridMinimum.x -= halfGridWidth; + gridMinimum.y -= halfGridWidth; + gridMinimum.z -= halfGridWidth; + + // TODO-2.1 TODO-2.3 - Allocate additional buffers here. + + cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int)); + checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!"); + + cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int)); + checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!"); + + cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int)); + checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!"); + + cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int)); + checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!"); + + cudaMalloc((void**)&dev_pos2, N * sizeof(glm::vec3)); + checkCUDAErrorWithLine("cudaMalloc dev_pos2 failed!"); + + cudaThreadSynchronize(); } @@ -181,41 +198,41 @@ void Boids::initSimulation(int N) { * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { - int index = threadIdx.x + (blockIdx.x * blockDim.x); + int index = threadIdx.x + (blockIdx.x * blockDim.x); - float c_scale = -1.0f / s_scale; + float c_scale = -1.0f / s_scale; - if (index < N) { - vbo[4 * index + 0] = pos[index].x * c_scale; - vbo[4 * index + 1] = pos[index].y * c_scale; - vbo[4 * index + 2] = pos[index].z * c_scale; - vbo[4 * index + 3] = 1.0f; - } + if (index < N) { + vbo[4 * index + 0] = pos[index].x * c_scale; + vbo[4 * index + 1] = pos[index].y * c_scale; + vbo[4 * index + 2] = pos[index].z * c_scale; + vbo[4 * index + 3] = 1.0f; + } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { - int index = threadIdx.x + (blockIdx.x * blockDim.x); - - if (index < N) { - vbo[4 * index + 0] = vel[index].x + 0.3f; - vbo[4 * index + 1] = vel[index].y + 0.3f; - vbo[4 * index + 2] = vel[index].z + 0.3f; - vbo[4 * index + 3] = 1.0f; - } + int index = threadIdx.x + (blockIdx.x * blockDim.x); + + if (index < N) { + vbo[4 * index + 0] = vel[index].x + 0.3f; + vbo[4 * index + 1] = vel[index].y + 0.3f; + vbo[4 * index + 2] = vel[index].z + 0.3f; + vbo[4 * index + 3] = 1.0f; + } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { - dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); + dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); - kernCopyPositionsToVBO << > >(numObjects, dev_pos, vbodptr_positions, scene_scale); - kernCopyVelocitiesToVBO << > >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); + kernCopyPositionsToVBO << > >(numObjects, dev_pos, vbodptr_positions, scene_scale); + kernCopyVelocitiesToVBO << > >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); - checkCUDAErrorWithLine("copyBoidsToVBO failed!"); + checkCUDAErrorWithLine("copyBoidsToVBO failed!"); - cudaDeviceSynchronize(); + cudaThreadSynchronize(); } @@ -223,6 +240,72 @@ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) * stepSimulation * ******************/ +__device__ glm::vec3 computeVelRule1(int N, const int iSelf, const glm::vec3 *pos) { + glm::vec3 vel1; + glm::vec3 thisPos = pos[iSelf]; + glm::vec3 perceived_center = thisPos; + + int count = 0; + for (int i = 0; i < N; i++) { + if (i == iSelf) { + continue; + } + glm::vec3 pos_b = pos[i]; + float dist = glm::length(thisPos - pos_b); + if (dist < rule1Distance) { + perceived_center += pos_b; + count++; + } + } + perceived_center /= count + 1; + vel1 = (perceived_center - thisPos) * rule1Scale; + return vel1; +} + +__device__ glm::vec3 computeVelRule2(int N, const int iSelf, const glm::vec3 *pos) { + glm::vec3 vel2; + glm::vec3 c; + glm::vec3 thisPos = pos[iSelf]; + + for (int i = 0; i < N; i++) { + if (i == iSelf) { + continue; + } + glm::vec3 pos_b = pos[i]; + float dist = glm::length(thisPos - pos_b); + if (dist < rule2Distance) { + c -= (pos_b - thisPos); + } + } + vel2 = c * rule2Scale; + return vel2; +} + +__device__ glm::vec3 computeVelRule3(int N, const int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { + glm::vec3 vel3; + glm::vec3 perceived_vel = vel[iSelf]; + glm::vec3 thisPos = pos[iSelf]; + + int count = 0; + for (int i = 0; i < N; i++) { + if (i == iSelf) { + continue; + } + glm::vec3 pos_b = pos[i]; + glm::vec3 vel_b = vel[i]; + float dist = glm::length(thisPos - pos_b); + if (dist < rule3Distance) { + perceived_vel += vel_b; + count++; + } + } + perceived_vel /= count + 1; + + perceived_vel *= rule3Scale; + vel3 = perceived_vel; + return vel3; +} + /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context @@ -230,10 +313,22 @@ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { - // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves - // Rule 2: boids try to stay a distance d away from each other - // Rule 3: boids try to match the speed of surrounding boids - return glm::vec3(0.0f, 0.0f, 0.0f); + // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves + glm::vec3 vel1, vel2, vel3; + vel1 = computeVelRule1(N, iSelf, pos); + + // Rule 2: boids try to stay a distance d away from each other + vel2 = computeVelRule2(N, iSelf, pos); + + // Rule 3: boids try to match the speed of surrounding boids + vel3 = computeVelRule3(N, iSelf, pos, vel); + + glm::vec3 resultVel = vel1 + vel2 + vel3; + + return resultVel; + + + //return glm::vec3(0.0f, 0.0f, 0.0f); } /** @@ -241,10 +336,23 @@ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *po * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, - glm::vec3 *vel1, glm::vec3 *vel2) { - // Compute a new velocity based on pos and vel1 - // Clamp the speed - // Record the new velocity into vel2. Question: why NOT vel1? + glm::vec3 *vel1, glm::vec3 *vel2) { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= N) { + return; + } + + // Compute a new velocity based on pos and vel1 + glm::vec3 deltaV = computeVelocityChange(N, index, pos, vel1); + glm::vec3 newVel = vel1[index] + deltaV; + + // Clamp the speed + if (newVel.length() > maxSpeed) { + newVel = glm::normalize(newVel) * maxSpeed; + } + + // Record the new velocity into vel2. Question: why NOT vel1? + vel2[index] = newVel; } /** @@ -252,24 +360,24 @@ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { - // Update position by velocity - int index = threadIdx.x + (blockIdx.x * blockDim.x); - if (index >= N) { - return; - } - glm::vec3 thisPos = pos[index]; - thisPos += vel[index] * dt; - - // Wrap the boids around so we don't lose them - thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; - thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; - thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; - - thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; - thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; - thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; - - pos[index] = thisPos; + // Update position by velocity + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= N) { + return; + } + glm::vec3 thisPos = pos[index]; + thisPos += vel[index] * dt; + + // Wrap the boids around so we don't lose them + thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; + thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; + thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; + + thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; + thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; + thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; + + pos[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. @@ -279,179 +387,419 @@ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // for(y) // for(z)? Or some other order? __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { - return x + y * gridResolution + z * gridResolution * gridResolution; + if (x<0 || x>gridResolution || x<0 || x>gridResolution || x<0 || x>gridResolution) + { + return -1; + } + else + { + return x + y * gridResolution + z * gridResolution * gridResolution; + } } __global__ void kernComputeIndices(int N, int gridResolution, - glm::vec3 gridMin, float inverseCellWidth, - glm::vec3 *pos, int *indices, int *gridIndices) { - // TODO-2.1 - // - Label each boid with the index of its grid cell. - // - Set up a parallel array of integer indices as pointers to the actual - // boid data in pos and vel1/vel2 + glm::vec3 gridMin, float inverseCellWidth, + glm::vec3 *pos, int *indices, int *gridIndices) { + // TODO-2.1 + // - Label each boid with the index of its grid cell. + // - Set up a parallel array of integer indices as pointers to the actual + // boid data in pos and vel1/vel2 + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= N) { + return; + } + + glm::vec3 thisPos = pos[index]; + + glm::ivec3 cellIdx3D; + cellIdx3D.x = (int)((thisPos.x - gridMin.x) * inverseCellWidth); + cellIdx3D.y = (int)((thisPos.y - gridMin.y) * inverseCellWidth); + cellIdx3D.z = (int)((thisPos.z - gridMin.z) * inverseCellWidth); + + int gridIndex = gridIndex3Dto1D(cellIdx3D.x, cellIdx3D.y, cellIdx3D.z, gridResolution); + + gridIndices[index] = gridIndex; + indices[index] = index; } // LOOK-2.1 Consider how this could be useful for indicating that a cell // does not enclose any boids __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { - int index = (blockIdx.x * blockDim.x) + threadIdx.x; - if (index < N) { - intBuffer[index] = value; - } + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index < N) { + intBuffer[index] = value; + } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, - int *gridCellStartIndices, int *gridCellEndIndices) { - // TODO-2.1 - // Identify the start point of each cell in the gridIndices array. - // This is basically a parallel unrolling of a loop that goes - // "this index doesn't match the one before it, must be a new cell!" + int *gridCellStartIndices, int *gridCellEndIndices) { + // TODO-2.1 + // Identify the start point of each cell in the gridIndices array. + // This is basically a parallel unrolling of a loop that goes + // "this index doesn't match the one before it, must be a new cell!" + + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= N) { + return; + } + if (index == 0) { + gridCellStartIndices[particleGridIndices[index]] = index; + return; + } + if (index == N - 1) { + gridCellStartIndices[particleGridIndices[index]] = index; + } + if (particleGridIndices[index] != particleGridIndices[index - 1]) { + int firstCellIndex = particleGridIndices[index - 1]; + int seconedCellIndex = particleGridIndices[index]; + gridCellEndIndices[firstCellIndex] = index - 1; + gridCellStartIndices[seconedCellIndex] = index; + } +} + +/** +* use the rearranged array index buffer to reshuffle all +* the particle data in the simulation array. +*/ +__global__ void kernShuffle(int N, int *particleGridIndices, glm::vec3 *source, glm::vec3 *dest) { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= N) { + return; + } + dest[index] = source[particleGridIndices[index]]; } __global__ void kernUpdateVelNeighborSearchScattered( - int N, int gridResolution, glm::vec3 gridMin, - float inverseCellWidth, float cellWidth, - int *gridCellStartIndices, int *gridCellEndIndices, - int *particleArrayIndices, - glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { - // TODO-2.1 - Update a boid's velocity using the uniform grid to reduce - // the number of boids that need to be checked. - // - Identify the grid cell that this particle is in - // - Identify which cells may contain neighbors. This isn't always 8. - // - For each cell, read the start/end indices in the boid pointer array. - // - Access each boid in the cell and compute velocity change from - // the boids rules, if this boid is within the neighborhood distance. - // - Clamp the speed change before putting the new speed in vel2 + int N, int gridResolution, glm::vec3 gridMin, + float inverseCellWidth, float cellWidth, + int *gridCellStartIndices, int *gridCellEndIndices, + int *particleArrayIndices, + glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { + // TODO-2.1 - Update a boid's velocity using the uniform grid to reduce + // the number of boids that need to be checked. + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= N) + { + return; + } + // - Identify the grid cell that this particle is in + int thisArrayIdx = particleArrayIndices[index]; + glm::vec3 thisPos = pos[thisArrayIdx]; + glm::vec3 cellIdx3D = (thisPos - gridMin) * inverseCellWidth; + // - Identify which cells may contain neighbors. This isn't always 8. + + int neighborCells[8]; + + int offsetX = ((thisPos.x - gridMin.x) * inverseCellWidth - cellIdx3D.x) >= 0.5f ? 1 : -1; + int offsetY = ((thisPos.y - gridMin.y) * inverseCellWidth - cellIdx3D.y) >= 0.5f ? 1 : -1; + int offsetZ = ((thisPos.z - gridMin.z) * inverseCellWidth - cellIdx3D.z) >= 0.5f ? 1 : -1; + int count = 0; + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + for (int k = 0; k < 2; k++) { + neighborCells[count] = gridIndex3Dto1D(cellIdx3D.x + i * offsetX, cellIdx3D.y + j * offsetY, cellIdx3D.z + k * offsetZ, gridResolution); + count++; + } + } + } + glm::vec3 rule1Vel, rule2Vel, rule3Vel; + glm::vec3 perceived_center, c, perceived_velocity; + int rule1Count = 0; + int rule3Count = 0; + // - For each cell, read the start/end indices in the boid pointer array. + // - Access each boid in the cell and compute velocity change from + // the boids rules, if this boid is within the neighborhood distance. + + for (int i = 0; i < 8; i++) { + if (neighborCells[i] == -1) { + continue; + } + for (int j = gridCellStartIndices[neighborCells[i]]; j < gridCellEndIndices[neighborCells[i]]; j++) { + int k = particleArrayIndices[j]; + if (k != thisArrayIdx) { + float distance = glm::length(pos[k] - pos[thisArrayIdx]); + if (distance < rule1Distance) { + perceived_center += pos[k]; + rule1Count++; + } + if (distance < rule2Distance) + { + c -= (pos[k] - pos[thisArrayIdx]); + } + if (distance < rule3Distance) + { + perceived_velocity += vel1[k]; + rule3Count++; + } + } + } + } + if (rule1Count > 0) + { + perceived_center /= float(rule1Count); + rule1Vel = (perceived_center - pos[thisArrayIdx])*rule1Scale; + } + rule2Vel = c * rule2Scale; + if (rule3Count > 0) + { + perceived_velocity /= float(rule3Count); + rule3Vel = perceived_velocity * rule3Scale; + } + + glm::vec3 resultVel = rule1Vel + rule2Vel + rule3Vel; + glm::vec3 newVel = vel1[thisArrayIdx] + resultVel; + // - Clamp the speed change before putting the new speed in vel2 + if (glm::length(newVel) > maxSpeed) + { + newVel = glm::normalize(newVel) * maxSpeed; + } + vel2[thisArrayIdx] = newVel; } __global__ void kernUpdateVelNeighborSearchCoherent( - int N, int gridResolution, glm::vec3 gridMin, - float inverseCellWidth, float cellWidth, - int *gridCellStartIndices, int *gridCellEndIndices, - glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { - // TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered, - // except with one less level of indirection. - // This should expect gridCellStartIndices and gridCellEndIndices to refer - // directly to pos and vel1. - // - Identify the grid cell that this particle is in - // - Identify which cells may contain neighbors. This isn't always 8. - // - For each cell, read the start/end indices in the boid pointer array. - // DIFFERENCE: For best results, consider what order the cells should be - // checked in to maximize the memory benefits of reordering the boids data. - // - Access each boid in the cell and compute velocity change from - // the boids rules, if this boid is within the neighborhood distance. - // - Clamp the speed change before putting the new speed in vel2 + int N, int gridResolution, glm::vec3 gridMin, + float inverseCellWidth, float cellWidth, + int *gridCellStartIndices, int *gridCellEndIndices, + glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { + // TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered, + // except with one less level of indirection. + // This should expect gridCellStartIndices and gridCellEndIndices to refer + // directly to pos and vel1. + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= N) + { + return; + } + // - Identify the grid cell that this particle is in + glm::vec3 thisPos = pos[index]; + glm::vec3 cellIdx3D = (thisPos - gridMin) * inverseCellWidth; + // - Identify which cells may contain neighbors. This isn't always 8. + int neighborCells[8]; + + int offsetX = ((thisPos.x - gridMin.x) * inverseCellWidth - cellIdx3D.x) >= 0.5f ? 1 : -1; + int offsetY = ((thisPos.y - gridMin.y) * inverseCellWidth - cellIdx3D.y) >= 0.5f ? 1 : -1; + int offsetZ = ((thisPos.z - gridMin.z) * inverseCellWidth - cellIdx3D.z) >= 0.5f ? 1 : -1; + int count = 0; + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j++) { + for (int k = 0; k < 2; k++) { + neighborCells[count] = gridIndex3Dto1D(cellIdx3D.x + i * offsetX, cellIdx3D.y + j * offsetY, cellIdx3D.z + k * offsetZ, gridResolution); + count++; + } + } + } + + glm::vec3 rule1Vel, rule2Vel, rule3Vel; + glm::vec3 perceived_center, c, perceived_velocity; + int rule1Count = 0; + int rule3Count = 0; + // - For each cell, read the start/end indices in the boid pointer array. + // DIFFERENCE: For best results, consider what order the cells should be + // checked in to maximize the memory benefits of reordering the boids data. + // - Access each boid in the cell and compute velocity change from + // the boids rules, if this boid is within the neighborhood distance. + for (int i = 0; i < 8; i++) + { + if (neighborCells[i] == -1) + { + continue; + } + for (int j = gridCellStartIndices[neighborCells[i]]; j < gridCellEndIndices[neighborCells[i]]; j++) + { + if (j != index) + { + float distance = glm::length(pos[j] - pos[index]); + // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves + if (distance < rule1Distance) + { + perceived_center += pos[j]; + rule1Count++; + } + // Rule 2: boids try to stay a distance d away from each other + if (distance < rule2Distance) + { + c -= (pos[j] - pos[index]); + } + // Rule 3: boids try to match the speed of surrounding boids + if (distance < rule3Distance) + { + perceived_velocity += vel1[j]; + rule3Count++; + } + } + } + } + if (rule1Count > 0) + { + perceived_center /= float(rule1Count); + rule1Vel = (perceived_center - pos[index])*rule1Scale; + } + rule2Vel = c * rule2Scale; + if (rule3Count > 0) + { + perceived_velocity /= float(rule3Count); + rule3Vel = perceived_velocity * rule3Scale; + } + + glm::vec3 resultVel = rule1Vel + rule2Vel + rule3Vel; + glm::vec3 newVel = vel1[index] + resultVel; + // - Clamp the speed change before putting the new speed in vel2 + if (glm::length(newVel) > maxSpeed) + { + newVel = glm::normalize(newVel) * maxSpeed; + } + vel2[index] = newVel; } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { - // TODO-1.2 - use the kernels you wrote to step the simulation forward in time. - // TODO-1.2 ping-pong the velocity buffers + // TODO-1.2 - use the kernels you wrote to step the simulation forward in time. + dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); + kernUpdateVelocityBruteForce << > > (numObjects, dev_pos, dev_vel1, dev_vel2); + kernUpdatePos << > > (numObjects, dt, dev_pos, dev_vel2); + + // TODO-1.2 ping-pong the velocity buffers + cudaMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice); } void Boids::stepSimulationScatteredGrid(float dt) { - // TODO-2.1 - // Uniform Grid Neighbor search using Thrust sort. - // In Parallel: - // - label each particle with its array index as well as its grid index. - // Use 2x width grids. - // - Unstable key sort using Thrust. A stable sort isn't necessary, but you - // are welcome to do a performance comparison. - // - Naively unroll the loop for finding the start and end indices of each - // cell's data pointers in the array of boid indices - // - Perform velocity updates using neighbor search - // - Update positions - // - Ping-pong buffers as needed + // TODO-2.1 + // Uniform Grid Neighbor search using Thrust sort. + // In Parallel: + // - label each particle with its array index as well as its grid index. + dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); + kernComputeIndices << > > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); + // Use 2x width grids. + // - Unstable key sort using Thrust. A stable sort isn't necessary, but you + // are welcome to do a performance comparison. + dev_thrust_particleArrayIndices = thrust::device_pointer_cast(dev_particleArrayIndices); + dev_thrust_particleGridIndices = thrust::device_pointer_cast(dev_particleGridIndices); + thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices); + // - Naively unroll the loop for finding the start and end indices of each + // cell's data pointers in the array of boid indices + dim3 fullBlocksPerGrid2((gridCellCount + blockSize - 1) / blockSize); + + kernResetIntBuffer << > > (gridCellCount, dev_gridCellStartIndices, 0); + kernResetIntBuffer << > > (gridCellCount, dev_gridCellEndIndices, 0); + kernIdentifyCellStartEnd << < fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); + // - Perform velocity updates using neighbor search + kernUpdateVelNeighborSearchScattered << > > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, + dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2); + + // - Update positions + kernUpdatePos << > > (numObjects, dt, dev_pos, dev_vel2); + // - Ping-pong buffers as needed + std::swap(dev_vel1, dev_vel2); + } void Boids::stepSimulationCoherentGrid(float dt) { - // TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid - // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. - // In Parallel: - // - Label each particle with its array index as well as its grid index. - // Use 2x width grids - // - Unstable key sort using Thrust. A stable sort isn't necessary, but you - // are welcome to do a performance comparison. - // - Naively unroll the loop for finding the start and end indices of each - // cell's data pointers in the array of boid indices - // - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all - // the particle data in the simulation array. - // CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED - // - Perform velocity updates using neighbor search - // - Update positions - // - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. + // TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid + dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); + // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. + // In Parallel: + // - Label each particle with its array index as well as its grid index. + // Use 2x width grids + kernComputeIndices << > > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); + // - Unstable key sort using Thrust. A stable sort isn't necessary, but you + // are welcome to do a performance comparison. + dev_thrust_particleArrayIndices = thrust::device_pointer_cast(dev_particleArrayIndices); + dev_thrust_particleGridIndices = thrust::device_pointer_cast(dev_particleGridIndices); + thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices); + // - Naively unroll the loop for finding the start and end indices of each + // cell's data pointers in the array of boid indices + kernIdentifyCellStartEnd << > > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); + // - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all + // the particle data in the simulation array. + // CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED + kernShuffle << > >(numObjects, dev_particleArrayIndices, dev_pos, dev_pos2); + kernShuffle << > >(numObjects, dev_particleArrayIndices, dev_vel2, dev_vel1); + // - Perform velocity updates using neighbor search + kernUpdateVelNeighborSearchCoherent << > > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_pos2, dev_vel1, dev_vel2); + // - Update positions + kernUpdatePos << > >(numObjects, dt, dev_pos2, dev_vel2); + // - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. + std::swap(dev_pos, dev_pos2); + } void Boids::endSimulation() { - cudaFree(dev_vel1); - cudaFree(dev_vel2); - cudaFree(dev_pos); - - // TODO-2.1 TODO-2.3 - Free any additional buffers here. + cudaFree(dev_vel1); + cudaFree(dev_vel2); + cudaFree(dev_pos); + + // TODO-2.1 TODO-2.3 - Free any additional buffers here. + cudaFree(dev_particleArrayIndices); + cudaFree(dev_particleGridIndices); + cudaFree(dev_gridCellStartIndices); + cudaFree(dev_gridCellEndIndices); + cudaFree(dev_pos2); } void Boids::unitTest() { - // LOOK-1.2 Feel free to write additional tests here. - - // test unstable sort - int *dev_intKeys; - int *dev_intValues; - int N = 10; - - std::unique_ptrintKeys{ new int[N] }; - std::unique_ptrintValues{ new int[N] }; - - intKeys[0] = 0; intValues[0] = 0; - intKeys[1] = 1; intValues[1] = 1; - intKeys[2] = 0; intValues[2] = 2; - intKeys[3] = 3; intValues[3] = 3; - intKeys[4] = 0; intValues[4] = 4; - intKeys[5] = 2; intValues[5] = 5; - intKeys[6] = 2; intValues[6] = 6; - intKeys[7] = 0; intValues[7] = 7; - intKeys[8] = 5; intValues[8] = 8; - intKeys[9] = 6; intValues[9] = 9; - - cudaMalloc((void**)&dev_intKeys, N * sizeof(int)); - checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!"); - - cudaMalloc((void**)&dev_intValues, N * sizeof(int)); - checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!"); - - dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); - - std::cout << "before unstable sort: " << std::endl; - for (int i = 0; i < N; i++) { - std::cout << " key: " << intKeys[i]; - std::cout << " value: " << intValues[i] << std::endl; - } - - // How to copy data to the GPU - cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice); - cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice); - - // Wrap device vectors in thrust iterators for use with thrust. - thrust::device_ptr dev_thrust_keys(dev_intKeys); - thrust::device_ptr dev_thrust_values(dev_intValues); - // LOOK-2.1 Example for using thrust::sort_by_key - thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); - - // How to copy data back to the CPU side from the GPU - cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost); - cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost); - checkCUDAErrorWithLine("memcpy back failed!"); - - std::cout << "after unstable sort: " << std::endl; - for (int i = 0; i < N; i++) { - std::cout << " key: " << intKeys[i]; - std::cout << " value: " << intValues[i] << std::endl; - } - - // cleanup - cudaFree(dev_intKeys); - cudaFree(dev_intValues); - checkCUDAErrorWithLine("cudaFree failed!"); - return; + // LOOK-1.2 Feel free to write additional tests here. + + // test unstable sort + int *dev_intKeys; + int *dev_intValues; + int N = 10; + + int *intKeys = new int[N]; + int *intValues = new int[N]; + + intKeys[0] = 0; intValues[0] = 0; + intKeys[1] = 1; intValues[1] = 1; + intKeys[2] = 0; intValues[2] = 2; + intKeys[3] = 3; intValues[3] = 3; + intKeys[4] = 0; intValues[4] = 4; + intKeys[5] = 2; intValues[5] = 5; + intKeys[6] = 2; intValues[6] = 6; + intKeys[7] = 0; intValues[7] = 7; + intKeys[8] = 5; intValues[8] = 8; + intKeys[9] = 6; intValues[9] = 9; + + cudaMalloc((void**)&dev_intKeys, N * sizeof(int)); + checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!"); + + cudaMalloc((void**)&dev_intValues, N * sizeof(int)); + checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!"); + + dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); + + std::cout << "before unstable sort: " << std::endl; + for (int i = 0; i < N; i++) { + std::cout << " key: " << intKeys[i]; + std::cout << " value: " << intValues[i] << std::endl; + } + + // How to copy data to the GPU + cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice); + cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice); + + // Wrap device vectors in thrust iterators for use with thrust. + thrust::device_ptr dev_thrust_keys(dev_intKeys); + thrust::device_ptr dev_thrust_values(dev_intValues); + // LOOK-2.1 Example for using thrust::sort_by_key + thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); + + // How to copy data back to the CPU side from the GPU + cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost); + cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost); + checkCUDAErrorWithLine("memcpy back failed!"); + + std::cout << "after unstable sort: " << std::endl; + for (int i = 0; i < N; i++) { + std::cout << " key: " << intKeys[i]; + std::cout << " value: " << intValues[i] << std::endl; + } + + // cleanup + delete[] intKeys; + delete[] intValues; + cudaFree(dev_intKeys); + cudaFree(dev_intValues); + checkCUDAErrorWithLine("cudaFree failed!"); + return; } diff --git a/src/main.cpp b/src/main.cpp index b82c8c6..ddd0e3b 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -14,8 +14,8 @@ // LOOK-2.1 LOOK-2.3 - toggles for UNIFORM_GRID and COHERENT_GRID #define VISUALIZE 1 -#define UNIFORM_GRID 0 -#define COHERENT_GRID 0 +#define UNIFORM_GRID 1 +#define COHERENT_GRID 1 // LOOK-1.2 - change this to adjust particle count in the simulation const int N_FOR_VIS = 5000;