pmpp-eval / pmpp_coding.jsonl
sinatras's picture
Update task paths for pmpp-eval repo release
9cd4939 verified
{"type": "coding", "id": "ch02-vecadd-single-turn", "question": "Task: ch02-vecadd-single-turn\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n#include <cuda_runtime.h>\n\n__global__ void vecAddKernel(const float* A, const float* B, float* C, int n) {\n // TODO: Implement vector addition\n // Hints:\n // - Calculate global thread index i from blockIdx.x, blockDim.x, and threadIdx.x\n // - Add bounds check to ensure i < n\n // - Compute C[i] = A[i] + B[i]\n}\n```", "task_dir": "eval-tasks/ch02-vecadd-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch02-vecmul-single-turn", "question": "Task: ch02-vecmul-single-turn\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n#include <cuda_runtime.h>\n\n__global__ void vecMulKernel(const float* A, const float* B, float* C, int n) {\n // TODO: Each thread i computes: C[i] = A[i] * B[i] (if i < n)\n // Hints:\n // - Derive global index i from block and thread indices\n // - Guard against i >= n\n}\n```", "task_dir": "eval-tasks/ch02-vecmul-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch03-ex1a-matmul-row-per-thread", "question": "Task: ch03-ex1a-matmul-row-per-thread\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n#include <cuda_runtime.h>\n\nextern \"C\" __global__\nvoid matrixMulRowKernel(const float* __restrict__ M,\n const float* __restrict__ N,\n float* __restrict__ P,\n int size) {\n // TODO:\n // - Each thread computes ONE output row 'row'\n // - Guard: if (row < size)\n // - For each column 'col', compute dot(M[row, :], N[:, col])\n // - Write P[row * size + col]\n // Hints:\n // int row = blockIdx.x * blockDim.x + threadIdx.x;\n // for (int col = 0; col < size; ++col) { ... }\n}\n```", "task_dir": "eval-tasks/ch03-ex1a-matmul-row-per-thread", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch03-ex1b-matmul-col-per-thread", "question": "Task: ch03-ex1b-matmul-col-per-thread\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n#include <cuda_runtime.h>\n\nextern \"C\" __global__\nvoid matrixMulColKernel(const float* __restrict__ M,\n const float* __restrict__ N,\n float* __restrict__ P,\n int size) {\n // TODO:\n // - Each thread computes ONE output column 'col'\n // - Guard: if (col < size) \n // - For each row 'row', compute dot(M[row, :], N[:, col])\n // - Write P[row * size + col]\n // Hints:\n // int col = blockIdx.x * blockDim.x + threadIdx.x;\n // for (int row = 0; row < size; ++row) { ... }\n}\n```", "task_dir": "eval-tasks/ch03-ex1b-matmul-col-per-thread", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch03-rgb2gray-single-turn", "question": "Task: ch03-rgb2gray-single-turn\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n#include <cuda_runtime.h>\n#include <math.h>\n\n__device__ __forceinline__ unsigned char clamp_u8(int v) {\n return (unsigned char)(v < 0 ? 0 : (v > 255 ? 255 : v));\n}\n\n__global__ void rgb2grayKernel(const unsigned char* R,\n const unsigned char* G,\n const unsigned char* B,\n unsigned char* gray,\n int n) {\n // TODO:\n // - Compute global index i\n // - If (i < n), compute:\n // float y = 0.299f*R[i] + 0.587f*G[i] + 0.114f*B[i];\n // int yi = (int)floorf(y + 0.5f); // round to nearest\n // gray[i] = clamp_u8(yi);\n}\n```", "task_dir": "eval-tasks/ch03-rgb2gray-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch04-device-props-eval", "question": "Task: ch04-device-props-eval\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n#include \"student_kernel.cuh\"\n#include <cuda_runtime.h>\n#include <string.h>\n\nint collect_device_info(DeviceInfo* out, int max_out, int* out_count) {\n // TODO: Implement using CUDA Runtime API\n // Required calls:\n // - cudaGetDeviceCount(&count)\n // - For each device id in [0, count): cudaGetDeviceProperties(&prop, id)\n //\n // Required fields to fill in for each DeviceInfo (from cudaDeviceProp prop):\n // name -> prop.name (ensure null-terminated)\n // major -> prop.major\n // minor -> prop.minor\n // totalGlobalMem -> prop.totalGlobalMem\n // multiProcessorCount -> prop.multiProcessorCount\n // totalConstMem -> prop.totalConstMem\n // sharedMemPerBlock -> prop.sharedMemPerBlock\n // regsPerBlock -> prop.regsPerBlock\n // warpSize -> prop.warpSize\n // maxThreadsPerBlock -> prop.maxThreadsPerBlock\n // maxThreadsDim{0,1,2} -> prop.maxThreadsDim[0..2]\n // maxGridSize{0,1,2} -> prop.maxGridSize[0..2]\n // clockRate -> prop.clockRate\n // memoryClockRate -> prop.memoryClockRate\n // memoryBusWidth -> prop.memoryBusWidth\n // l2CacheSize -> prop.l2CacheSize\n //\n // Return 0 on success, non-zero on failure.\n\n (void)out; (void)max_out; (void)out_count; // remove after implementing\n return 1; // placeholder: non-zero means \"not implemented\"\n}\n```", "task_dir": "eval-tasks/ch04-device-props-eval", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch05-matmul-tiled", "question": "Task: ch05-matmul-tiled\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n#include <cuda_runtime.h>\n#include <vector>\n#include <cstring>\n#include <cstdio>\n\n#ifndef TILE\n#define TILE 16\n#endif\n\n#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }\ninline void gpuAssert(cudaError_t code, const char* file, int line, bool abort=true){\n if(code != cudaSuccess){\n fprintf(stderr,\"GPUassert: %s %s %d\\n\", cudaGetErrorString(code), file, line);\n if(abort) exit(code);\n }\n}\n\n// TODO: Implement shared-memory tiled matrix multiplication kernel\n//\n// Requirements:\n// - Implement GEMM: P = M * N where M is mn, N is no, P is mo\n// - Use shared memory tiling to reduce global memory accesses\n// - Declare two shared memory tiles (size TILETILE) for M and N submatrices\n// - Process matrix multiplication in phases, loading one tile at a time\n// - Use __syncthreads() to ensure all threads have loaded data before computing\n// - Handle edge cases with bounds checking (matrices may not align to TILE boundaries)\n// - Do NOT modify inputs M or N (they are const)\n//\n// Algorithm outline:\n// 1. Calculate output row and column for this thread\n// 2. Loop over tiles along the shared dimension (k-dimension):\n// a. Load one TILETILE submatrix from M into shared memory\n// b. Load one TILETILE submatrix from N into shared memory\n// c. Use bounds checks: load 0.0f if out of matrix bounds\n// d. Synchronize threads (__syncthreads())\n// e. Compute partial dot product using shared memory tiles\n// f. Synchronize threads again before next tile\n// 3. Write accumulated result to global memory P\n//\n// Hints:\n// - TILE is defined as a compile-time constant (default 16)\n// - Use threadIdx for within-block indexing, blockIdx for block position\n// - Shared memory declarations: __shared__ float tile_M[TILE][TILE];\n// - Number of phases: (n + TILE - 1) / TILE\n// - Matrix indexing: M[row * n + k], N[k * o + col], P[row * o + col]\n\n__global__ void TiledMatMulKernel(const float* __restrict__ M,\n const float* __restrict__ N,\n float* __restrict__ P,\n int m, int n, int o)\n{\n // TODO: Implement tiled matrix multiplication\n}\n\n// TODO: Implement kernel launcher\n//\n// Requirements:\n// - Allocate device memory for M, N, and P matrices\n// - Copy input matrices M and N from host to device\n// - Configure grid and block dimensions for tiled execution\n// - Launch TiledMatMulKernel with appropriate parameters\n// - Copy result matrix P from device back to host\n// - Free all device allocations\n//\n// Hints:\n// - Handle edge case: if m==0 || n==0 || o==0, return early\n// - Memory size: M needs m*n*sizeof(float), N needs n*o*sizeof(float), P needs m*o*sizeof(float)\n// - Use gpuErrchk() macro to wrap all CUDA API calls for error checking\n// - Block dimensions: dim3 block(TILE, TILE) for 2D thread blocks\n// - Grid dimensions: ensure enough blocks to cover entire output matrix\n// - Grid size formula: (dimension + TILE - 1) / TILE\n// - Remember to synchronize after kernel launch to catch execution errors\n\nextern \"C\"\nvoid launch_tiled_matmul(const float* M_h, const float* N_h, float* P_h,\n int m, int n, int o)\n{\n // TODO: Implement launcher\n (void)M_h; (void)N_h; (void)P_h; (void)m; (void)n; (void)o;\n}```", "task_dir": "eval-tasks/ch05-matmul-tiled", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch05-matmul-tiled-multiturn", "question": "Task: ch05-matmul-tiled-multiturn\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// student_kernel.cu\n// Implement a shared-memory tiled matrix multiply kernel:\n// C[M x K] = A[M x N] * B[N x K]\n// TILE size is 16x16. Handle non-multiple sizes and out-of-bounds safely.\n\n#include <cuda_runtime.h>\n\nextern \"C\" void launch_student(const float* A, const float* B, float* C,\n int M, int N, int K, int blockSize);\n\n// TODO: Implement this kernel\n__global__ void matmul_tiled_student(const float* __restrict__ A,\n const float* __restrict__ B,\n float* __restrict__ C,\n int M, int N, int K)\n{\n // TODO: Implement shared-memory tiled matrix multiplication\n // REQUIRED: TILE = 16\n // \n // Steps to implement:\n // 1. Define TILE size (16)\n // 2. Calculate 2D thread coordinates (row, col) in output matrix C\n // 3. Declare shared memory tiles for A and B submatrices \n // 4. Initialize accumulator\n // 5. Loop over tiles along the inner dimension N:\n // a. Cooperatively load A tile and B tile into shared memory\n // b. Guard against out-of-bounds accesses (pad with zeros)\n // c. Synchronize threads (__syncthreads())\n // d. Compute partial products using shared memory tiles\n // e. Synchronize threads again\n // 6. Write final result to global memory (with bounds checking)\n}\n\nextern \"C\" void launch_student(const float* A, const float* B, float* C,\n int M, int N, int K, int /*blockSize*/)\n{\n // TODO: Set up proper grid and block dimensions\n // Hint: Use 16x16 thread blocks, calculate grid size based on output dimensions\n \n dim3 block(16, 16);\n dim3 grid((K + 15) / 16, (M + 15) / 16);\n matmul_tiled_student<<<grid, block>>>(A, B, C, M, N, K);\n}\n```", "task_dir": "eval-tasks/ch05-matmul-tiled-multiturn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch05-matmul-tiled-speed", "question": "Task: ch05-matmul-tiled-speed\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <cstdio>\n\n#ifndef TILE\n#define TILE 16\n#endif\n\n// Students implement this kernel: C[M x K] = A[M x N] * B[N x K]\n// One thread computes one C element; shared-memory tiled load of A and B.\n__global__ void matmul_tiled_student_kernel(const float* __restrict__ A,\n const float* __restrict__ B,\n float* __restrict__ C,\n int M, int N, int K) {\n // TODO:\n // - Compute (row, col) from blockIdx/threadIdx\n // - Loop over tiles of N dimension\n // - Use shared memory tiles for A (TILE x TILE) and B (TILE x TILE)\n // - Guard for out-of-bounds loads/stores\n // - Accumulate sum into a register and store to C[row*K + col]\n\n // Hints (remove after implementing):\n // extern __shared__ float smem[]; // or static shared tiles\n // __shared__ float As[TILE][TILE], Bs[TILE][TILE];\n // int row = blockIdx.y * blockDim.y + threadIdx.y;\n // int col = blockIdx.x * blockDim.x + threadIdx.x;\n\n // --- your code here ---\n\n // Placeholder stub (compiles but gives wrong results):\n int row = blockIdx.y * blockDim.y + threadIdx.y;\n int col = blockIdx.x * blockDim.x + threadIdx.x;\n\n if (row < M && col < K) {\n C[row * K + col] = 0.0f; // TODO: replace with proper tiled computation\n }\n}\n\n// Host wrapper called by test harness\nextern \"C\" void matmul_student(const float* dA, const float* dB, float* dC,\n int M, int N, int K, int tile) {\n dim3 block(TILE, TILE);\n dim3 grid((K + TILE - 1)/TILE, (M + TILE - 1)/TILE);\n // You may ignore `tile` and use TILE macro; the harness passes TILE=16.\n matmul_tiled_student_kernel<<<grid, block>>>(dA, dB, dC, M, N, K);\n}\n```", "task_dir": "eval-tasks/ch05-matmul-tiled-speed", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch06-thread-coarsening-matmul", "question": "Task: ch06-thread-coarsening-matmul\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n#include <cuda_runtime.h>\n#include <cstdio>\n\n#ifndef TILE_WIDTH\n#define TILE_WIDTH 16\n#endif\n\n#ifndef COARSE_FACTOR\n#define COARSE_FACTOR 4\n#endif\n\n// Students: implement a coarsened, tiled GEMM C[MK] = A[MN] * B[NK]\n// Each block computes a tile: height TILE_WIDTH, width TILE_WIDTH*COARSE_FACTOR\n// Use shared memory tiles for A and B; do safe (bounds-checked) loads.\n// Row-major layout: elem(i,j) at base[i*ld + j].\n\n__global__ void MatmulCoarsenedKernel(const float* __restrict__ A,\n const float* __restrict__ B,\n float* __restrict__ C,\n int M, int N, int K)\n{\n // TODO: Implement thread coarsening matrix multiplication\n //\n // Key requirements:\n // 1. Use shared memory tiles for A and B:\n // __shared__ float As[TILE_WIDTH][TILE_WIDTH];\n // __shared__ float Bs[TILE_WIDTH][TILE_WIDTH * COARSE_FACTOR];\n //\n // 2. Each thread computes COARSE_FACTOR output elements\n // - Thread (tx,ty) computes elements at columns: colBase + c*TILE_WIDTH for c=0..COARSE_FACTOR-1\n // - Use register array: float acc[COARSE_FACTOR];\n //\n // 3. Loop over tiles of the N dimension:\n // - Load A tile (TILE_WIDTH x TILE_WIDTH)\n // - Load B super-tile (TILE_WIDTH x TILE_WIDTH*COARSE_FACTOR) in COARSE_FACTOR stripes\n // - __syncthreads() after loading\n // - Compute partial products with triple nested loop (k, c)\n // - __syncthreads() before next iteration\n //\n // 4. Write results with bounds checking\n //\n // Template structure:\n // const int ty = threadIdx.y;\n // const int tx = threadIdx.x;\n // const int row = blockIdx.y * TILE_WIDTH + ty;\n // const int colBase = blockIdx.x * (TILE_WIDTH * COARSE_FACTOR) + tx;\n //\n // float acc[COARSE_FACTOR];\n // for (int c = 0; c < COARSE_FACTOR; ++c) acc[c] = 0.0f;\n //\n // Loop over tiles...\n //\n // Write results...\n\n // Placeholder implementation (incorrect, will fail tests):\n int row = blockIdx.y * blockDim.y + threadIdx.y;\n int col = blockIdx.x * blockDim.x + threadIdx.x;\n\n if (row < M && col < K) {\n C[row * K + col] = 0.0f; // TODO: Replace with actual coarsened computation\n }\n}\n\n// Student launcher: choose grid/block and launch your kernel\nextern \"C\" void launch_student(const float* A_d,\n const float* B_d,\n float* C_d,\n int M, int N, int K)\n{\n // TODO: Configure proper grid and block dimensions for thread coarsening\n //\n // Key points:\n // - Block size should be (TILE_WIDTH, TILE_WIDTH)\n // - Grid X dimension should account for COARSE_FACTOR: (K + TILE_WIDTH*COARSE_FACTOR - 1) / (TILE_WIDTH*COARSE_FACTOR)\n // - Grid Y dimension covers rows: (M + TILE_WIDTH - 1) / TILE_WIDTH\n\n // Placeholder launch (incorrect dimensions):\n dim3 block(TILE_WIDTH, TILE_WIDTH);\n dim3 grid((K + TILE_WIDTH - 1) / TILE_WIDTH, (M + TILE_WIDTH - 1) / TILE_WIDTH);\n\n MatmulCoarsenedKernel<<<grid, block>>>(A_d, B_d, C_d, M, N, K);\n}\n```", "task_dir": "eval-tasks/ch06-thread-coarsening-matmul", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch07-conv2d-tiled-constant", "question": "Task: ch07-conv2d-tiled-constant\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// students edit only this file\n#include <cuda_runtime.h>\n#include <cstdio>\n\n#ifndef TILE\n#define TILE 16\n#endif\n\n#ifndef MAX_RADIUS\n#define MAX_RADIUS 8 // supports up to (2*8+1)=17x17 filters\n#endif\n\n// 1D constant buffer for filter coefficients (row-major)\n// Size = (2*MAX_RADIUS+1)^2\n__constant__ float c_filter[(2*MAX_RADIUS+1)*(2*MAX_RADIUS+1)];\n\nextern \"C\" __host__ void setFilterConstant(const float* h_filter, int r) {\n const int K = 2*r + 1;\n cudaMemcpyToSymbol(c_filter, h_filter, K*K*sizeof(float), 0, cudaMemcpyHostToDevice);\n}\n\n// Students must implement this kernel.\n// Requirements:\n// - Shared-memory tiling with halo of +/-r (use zero padding for out-of-bounds loads)\n// - Use c_filter (in constant memory) for filter coefficients\n// - Each thread computes one output pixel (if in bounds)\n// - Grid/block: 2D, blockDim=(TILE,TILE), gridDim=ceil(W/TILE) x ceil(H/TILE)\n// - Inputs/outputs are float* (grayscale), shapes: in/out = H*W\n// - r is runtime radius (<= MAX_RADIUS)\n__global__ void conv2d_tiled_constant_kernel(const float* __restrict__ in,\n float* __restrict__ out,\n int H, int W, int r)\n{\n // TODO: Implement tiled 2D convolution with constant memory\n //\n // Key steps:\n // 1) Compute global (x,y) coordinates for this thread\n // 2) Declare shared memory tile with halo: extern __shared__ float smem[];\n // Size needed: (TILE+2*r) * (TILE+2*r) * sizeof(float)\n // 3) Compute the tile's coverage region including halo\n // 4) Cooperatively load the entire tile+halo region into shared memory\n // - Use zero padding for out-of-bounds pixels\n // - May need nested loops for threads to cover entire shared memory region\n // 5) __syncthreads() to ensure all data is loaded\n // 6) If this thread's output pixel is in bounds, compute convolution:\n // - Access input pixels from shared memory (with proper offsets)\n // - Access filter coefficients from c_filter constant memory\n // - Accumulate weighted sum\n // 7) Write result to global output memory\n //\n // Hints:\n // - Filter coefficients in c_filter are stored row-major: c_filter[(dy+r)*(2*r+1) + (dx+r)]\n // - Shared memory indexing: smem[sy * tileWidth + sx] where tileWidth = TILE+2*r\n // - Global input indexing: in[gy * W + gx]\n // - Consider boundary conditions carefully for both image edges and tile edges\n\n // Placeholder implementation (will fail until properly implemented):\n int x = blockIdx.x * blockDim.x + threadIdx.x;\n int y = blockIdx.y * blockDim.y + threadIdx.y;\n\n if (x < W && y < H) {\n // This is incorrect - just copies input to output\n out[y * W + x] = in[y * W + x];\n }\n}\n```", "task_dir": "eval-tasks/ch07-conv2d-tiled-constant", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch08-heat-3d-single-turn", "question": "Task: ch08-heat-3d-single-turn\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <cstdio>\n\n// Implement a single explicit 7-point heat step.\n// in : N*N*N input (flattened, row-major: i*N*N + j*N + k)\n// out : N*N*N output (same layout)\n// N : grid dimension\n// alpha, dt, dx: physical parameters; r = alpha*dt/(dx*dx)\n// Boundary policy: copy boundary through (out = in) if any neighbor would be OOB.\n__global__ void heat_step_kernel(const float* __restrict__ in,\n float* __restrict__ out,\n unsigned int N,\n float alpha, float dt, float dx)\n{\n // TODO:\n // 1) Compute (i,j,k) from block and thread indices\n // 2) If any of i,j,k is 0 or N-1 => boundary: out = in and return\n // 3) Else compute r = alpha*dt/(dx*dx) and 7-point update:\n // out = in + r*(sum six neighbors - 6*in)\n // Guard small N (N<3) by simply copying (no interior exists).\n}\n```", "task_dir": "eval-tasks/ch08-heat-3d-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch08-stencil3d-basic-single-turn", "question": "Task: ch08-stencil3d-basic-single-turn\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <cstdio>\n\n__global__ void stencil3d_basic_student(\n const float* __restrict__ in,\n float* __restrict__ out,\n int N,\n float c0, float c1, float c2, float c3, float c4, float c5, float c6)\n{\n // TODO:\n // - Each thread computes OUT(i,j,k) for one grid point\n // - Use 7-point stencil on INTERIOR points: (1..N-2) in each dim\n // - For boundary points (i==0 || i==N-1 || ...), copy through: out = in\n // - Guard for N==0 or N==1 safely\n // Hints:\n // int i = blockIdx.z * blockDim.z + threadIdx.z;\n // int j = blockIdx.y * blockDim.y + threadIdx.y;\n // int k = blockIdx.x * blockDim.x + threadIdx.x;\n // int idx = (i * N + j) * N + k;\n // int L = ((i) * N + j) * N + (k-1); // k-1, etc.\n}\n```", "task_dir": "eval-tasks/ch08-stencil3d-basic-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch08-stencil3d-sharedmem-single-turn", "question": "Task: ch08-stencil3d-sharedmem-single-turn\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <cstdio>\n\n// Tile parameters for this task\n#ifndef IN_TILE_DIM\n#define IN_TILE_DIM 8 // threads per dim that load (with halo)\n#endif\n#define OUT_TILE_DIM (IN_TILE_DIM-2)\n\n__global__ void stencil3d_shared_student(\n const float* __restrict__ in,\n float* __restrict__ out,\n int N,\n float c0, float c1, float c2, float c3, float c4, float c5, float c6)\n{\n // TODO:\n // - Launch with block=(IN_TILE_DIM,IN_TILE_DIM,IN_TILE_DIM)\n // - Each block loads a IN_TILE_DIM^3 tile (with halo) into shared memory\n // - Only threads with local coords in [1..IN_TILE_DIM-2] compute outputs\n // for the corresponding global interior coordinates\n // - Copy-through boundary cells (same rule as basic)\n // Hints:\n // Shared array: __shared__ float tile[IN_TILE_DIM][IN_TILE_DIM][IN_TILE_DIM];\n // Global coords start at (blockIdx * OUT_TILE_DIM) - 1 (to include halo)\n // Guard global loads (row/col/depth) that fall outside [0..N-1]\n}\n```", "task_dir": "eval-tasks/ch08-stencil3d-sharedmem-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch09-histogram-naive-single-turn", "question": "Task: ch09-histogram-naive-single-turn\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <cstddef>\n\n// TODO: Implement naive global-atomic histogram.\n//\n// Requirements:\n// - Use global-memory atomicAdd(&hist[bin], 1u)\n// - Grid-stride loop over N\n// - Ignore out-of-range bin indices\n// - Do not write to 'in'\n// - No shared memory\n//\n// Signature must not change.\n__global__ void histogram_kernel(const int* in, unsigned int* hist,\n size_t N, int num_bins)\n{\n // TODO:\n // size_t i = ...\n // size_t stride = ...\n // for (; i < N; i += stride) {\n // int bin = in[i];\n // if (0 <= bin && bin < num_bins) atomicAdd(&hist[bin], 1u);\n // }\n}\n```", "task_dir": "eval-tasks/ch09-histogram-naive-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch09-histogram-shared-single-turn", "question": "Task: ch09-histogram-shared-single-turn\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Use extern __shared__ unsigned int s_hist[] with num_bins elements.\n- Cooperatively zero the shared histogram before processing data.\n- Iterate over the input with a grid-stride loop and accumulate counts into shared memory (no atomics within the block).\n- Synchronize threads before and after shared-memory accumulation.\n- Cooperatively merge shared counts to global hist with atomicAdd, handling num_bins > blockDim.x.\n- Do not modify the input array and avoid out-of-bounds accesses.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // student_kernel.cu\n#include <cuda_runtime.h>\n#include <cstddef>\n\n// TODO: Implement shared-memory privatized histogram\n//\n// Requirements:\n// - Use shared memory to reduce global atomic contention\n// - Each block maintains its own private histogram in shared memory\n// - Accumulate counts into shared histogram using atomicAdd\n// - Flush shared histogram to global histogram at the end\n// - Process all N input elements across the grid\n//\n// Algorithm steps:\n// 1. Initialize shared memory histogram to zero (cooperative initialization)\n// - Each thread zeros multiple bins if num_bins > blockDim.x\n// - Use loop: for (int bin = threadIdx.x; bin < num_bins; bin += blockDim.x)\n// - Synchronize after initialization\n//\n// 2. Accumulate input values into shared histogram\n// - Use grid-stride loop to process input elements\n// - Read input value, treat it as bin index\n// - Validate bin is in range [0, num_bins)\n// - Use atomicAdd to increment shared histogram bin\n// - Synchronize after accumulation\n//\n// 3. Merge shared histogram into global histogram\n// - Each thread handles multiple bins if needed\n// - Use atomicAdd to add shared counts to global histogram\n//\n// Hints:\n// - Shared memory is declared: extern __shared__ unsigned int s_hist[];\n// - Grid-stride loop: for (size_t i = blockIdx.x*blockDim.x + threadIdx.x; i < N; i += blockDim.x*gridDim.x)\n// - Atomic operations: atomicAdd(&s_hist[bin], 1u) for shared, atomicAdd(&hist[bin], count) for global\n// - Synchronization required between phases: __syncthreads()\n\n__global__ void histogram_kernel(const int* in, unsigned int* hist,\n size_t N, int num_bins)\n{\n extern __shared__ unsigned int s_hist[];\n\n // TODO: Implement 3-phase histogram: init shared accumulate flush to global\n (void)in; (void)hist; (void)N; (void)num_bins; (void)s_hist;\n}\n ```", "task_dir": "eval-tasks/ch09-histogram-shared-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch10-reduction-max-arbitrary", "question": "Task: ch10-reduction-max-arbitrary\nTarget file: student_kernel.cu\n\nGoal: Implement an arbitrary-length maximum reduction that scans the input with grid-stride loops, performs a shared-memory tree reduction per block, and emits the final maximum via a race-free CAS-based atomic max on floats.\n\nKey requirements:\n- Kernel signature: `__global__ void reduce_max_arbitrary(const float* in, float* out, int n)`\n- Initialize each thread's local maximum to `-INFINITY` when it sees no elements.\n- Use grid-stride loops to cover the full array and reduce into shared memory before the atomic.\n- Implement `atomicMaxFloat` using `atomicCAS` on the integer bit pattern and update only `out[0]`.\n\nSkeleton:\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <limits>\n\n// TODO: Implement arbitrary-length maximum reduction with grid-stride, shared\n// memory, and a CAS-loop atomicMax for float. Initialize per-thread local max\n// to -INFINITY when n==0 or no elements in its stride.\n//\n// IMPORTANT: Use the -INFINITY macro (not std::numeric_limits<float>::infinity())\n// Example: float local_max = -INFINITY;\n__device__ inline\nvoid atomicMaxFloat(float* addr, float val) {\n // TODO: Implement via atomicCAS on int bit patterns\n}\n\nextern \"C\" __global__\nvoid reduce_max_arbitrary(const float* in, float* out, int n) {\n // TODO\n}\n```", "task_dir": "eval-tasks/ch10-reduction-max-arbitrary", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch10-reduction-sum-2048", "question": "Task: ch10-reduction-sum-2048\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n\n// TODO: Implement convergent shared-memory reduction for exactly 2048 elements.\n// Contract:\n// - gridDim.x == 1, blockDim.x == 1024\n// - Each thread loads two elements: in[tid] and in[tid + 1024]\n// - Reduce in shared memory (convergent pattern), write out[0] only.\nextern \"C\" __global__\nvoid reduce_sum_2048(const float* in, float* out) {\n // TODO\n // Suggested shape:\n // __shared__ float s[1024];\n // unsigned t = threadIdx.x;\n // float v = in[t] + in[t + 1024];\n // s[t] = v;\n // __syncthreads();\n // for (unsigned stride = blockDim.x/2; stride >= 1; stride >>= 1) { ... }\n // if (t == 0) out[0] = s[0];\n}\n```", "task_dir": "eval-tasks/ch10-reduction-sum-2048", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch10-reduction-sum-arbitrary", "question": "Task: ch10-reduction-sum-arbitrary\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n\n// TODO: Implement arbitrary-length sum with grid-stride loads, shared-memory\n// reduction per block, and atomicAdd(out, block_sum). Use dynamic shared memory.\nextern \"C\" __global__\nvoid reduce_sum_arbitrary(const float* in, float* out, int n) {\n // TODO\n // Suggested shape:\n // extern __shared__ float s[];\n // int tid = threadIdx.x;\n // long long idx = blockIdx.x * (long long)blockDim.x * 2 + tid;\n // long long stride = (long long)gridDim.x * blockDim.x * 2;\n // float sum = 0.f;\n // for (; idx < n; idx += stride) {\n // sum += in[idx];\n // long long idx2 = idx + blockDim.x;\n // if (idx2 < n) sum += in[idx2];\n // }\n // s[tid] = sum; __syncthreads();\n // for (int step = blockDim.x/2; step >= 1; step >>= 1) { ... }\n // if (tid==0) atomicAdd(out, s[0]);\n}\n```", "task_dir": "eval-tasks/ch10-reduction-sum-arbitrary", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch12-merge-path-single-turn", "question": "Task: ch12-merge-path-single-turn\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n\n// TODO: Implement parallel merge using merge-path (diagonal partition).\n// Contract summary:\n// - Stable: on ties, choose A first\n// - Partition per thread using diagonals; then sequentially merge that slice\n// - Inputs A,B are sorted ascending; write C of length nA+nB\n\n__device__ inline int clampi(int x, int lo, int hi) {\n return x < lo ? lo : (x > hi ? hi : x);\n}\n\n// Find (i,j) on diagonal d (i+j = d) satisfying merge-path conditions.\n// Returns i; j = d - i.\n// Invariants:\n// lo = max(0, d - nB), hi = min(d, nA)\n// Stable tie-breaking: A[i-1] <= B[j] (and B[j-1] < A[i])\n__device__ __forceinline__\nint merge_path_search(const int* __restrict__ A, int nA,\n const int* __restrict__ B, int nB,\n int d)\n{\n // TODO: Implement binary search to find merge-path coordinates\n // Return i such that (i, d-i) satisfies merge-path conditions\n return 0; // placeholder\n}\n\nextern \"C\" __global__\nvoid merge_path_kernel(const int* __restrict__ A, int nA,\n const int* __restrict__ B, int nB,\n int* __restrict__ C)\n{\n // TODO: Implement merge-path parallel merge\n // 1. Calculate thread's diagonal range [d0, d1)\n // 2. Find merge coordinates (i0,j0) and (i1,j1) using merge_path_search\n // 3. Sequentially merge A[i0..i1) and B[j0..j1) into C[d0..d1)\n}\n```", "task_dir": "eval-tasks/ch12-merge-path-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch13-merge-path-fullsort-single", "question": "Task: ch13-merge-path-fullsort-single\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// ch13-merge-path-fullsort-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n\n// CONTRACT:\n// Implement stable GPU merge sort using iterative merge passes.\n// You must implement:\n// - merge_path_search (device): diagonal search\n// - merge_path_kernel (global): merges a slice [d0,d1)\n// - gpu_merge_sort (host): doubles width and ping-pongs buffers until sorted\n\n__device__ int merge_path_search(const uint32_t* A, int nA,\n const uint32_t* B, int nB,\n int d)\n{\n // TODO: diagonal binary search; return i (then j=d-i)\n return 0;\n}\n\n__global__ void merge_path_kernel(const uint32_t* __restrict__ A, int nA,\n const uint32_t* __restrict__ B, int nB,\n uint32_t* __restrict__ C)\n{\n // TODO:\n // - P = total threads\n // - segment size seg = ceil((nA+nB)/P)\n // - each thread t merges its slice [d0,d1)\n // - compute (i0,j0) & (i1,j1) via merge_path_search\n // - sequentially merge into C[d0..d1)\n}\n\nextern \"C\" void gpu_merge_sort(const uint32_t* d_in, uint32_t* d_out, int n)\n{\n // TODO:\n // - width = 1; ping-pong buffers\n // - for width < n:\n // * launch merges of adjacent runs [k..k+width) and [k+width..k+2*width)\n // - final result copied to d_out\n (void)d_in; (void)d_out; (void)n;\n}\n```", "task_dir": "eval-tasks/ch13-merge-path-fullsort-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch13-radix-multiradix-coarsened-fullsort-single", "question": "Task: ch13-radix-multiradix-coarsened-fullsort-single\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n#include <cstdio>\n\nstatic inline void CK(cudaError_t e,const char* m){\n if(e!=cudaSuccess){ std::fprintf(stderr,\"CUDA %s: %s\\n\", m, cudaGetErrorString(e)); std::exit(2); }\n}\n\n#ifndef RADIX_BITS\n#define RADIX_BITS 4\n#endif\n#ifndef COARSENING_FACTOR\n#define COARSENING_FACTOR 8\n#endif\n#ifndef BLOCK\n#define BLOCK 256\n#endif\n\n// TODO: Implement a stable 4-bit LSD radix sort with thread coarsening (COARSENING_FACTOR).\n// Sort must be in-place on `data` (you may use a temp buffer internally).\n\n// Choose your radix size and coarsening factor\n#define RADIX_SIZE (1 << RADIX_BITS) // 2^RADIX_BITS buckets\n#define RADIX_MASK (RADIX_SIZE - 1)\n\nextern \"C\" __global__\nvoid radix_sort_coarsened_kernel(unsigned int* __restrict__ data,\n unsigned int* __restrict__ temp,\n int n,\n int shift)\n{\n // TODO: Implement coarsened multi-radix sort pass\n // 1. Each thread loads COARSENING_FACTOR elements\n // 2. Count elements for each bucket using coarsened loading\n // 3. Compute prefix sums to find output positions for each bucket\n // 4. Scatter elements to correct positions based on radix value\n // 5. Ensure stable sorting and efficient memory access patterns\n}\n\nextern \"C\"\nvoid radix_sort_coarsened_host(unsigned int* data, int n)\n{\n if(n <= 1) return;\n // Placeholder: shallow copy (intentionally insufficient so tests fail until implemented)\n unsigned int* tmp=nullptr;\n CK(cudaMalloc(&tmp, n*sizeof(unsigned int)), \"malloc tmp\");\n CK(cudaMemcpy(tmp, data, n*sizeof(unsigned int), cudaMemcpyDeviceToDevice), \"copy to tmp\");\n CK(cudaMemcpy(data, tmp, n*sizeof(unsigned int), cudaMemcpyDeviceToDevice), \"copy back\");\n cudaFree(tmp);\n}\n```", "task_dir": "eval-tasks/ch13-radix-multiradix-coarsened-fullsort-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch13-radix-multiradix-fullsort-single", "question": "Task: ch13-radix-multiradix-fullsort-single\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n#include <cstdio>\n\nstatic inline void CK(cudaError_t e, const char* m){\n if(e != cudaSuccess){\n std::fprintf(stderr, \"CUDA %s: %s\\n\", m, cudaGetErrorString(e));\n std::exit(2);\n }\n}\n\n// TODO: Implement a multiradix (RADIX_BITS=4) stable radix sort in-place.\n// Contract:\n// extern \"C\" void radix_sort_multiradix_host(unsigned int* data, int n);\n// Requirements:\n// - Sort ascending, stable per pass (4-bit buckets, 8 passes total).\n// - Arbitrary n (including 0 / non-multiples of block size).\n// - No OOB writes (tests use guarded buffers).\n// - In-place on `data` (you may use an internal device temp buffer).\n\n// Choose your radix size (recommended: 2-bit or 4-bit)\n#define RADIX_BITS 2\n#define RADIX_SIZE (1 << RADIX_BITS) // 2^RADIX_BITS buckets\n#define RADIX_MASK (RADIX_SIZE - 1)\n\nextern \"C\" __global__\nvoid radix_sort_multiradix_kernel(unsigned int* __restrict__ data,\n unsigned int* __restrict__ temp,\n int n,\n int shift)\n{\n // TODO: Implement multi-radix sort pass\n // 1. Count elements for each bucket (RADIX_SIZE buckets) using shared memory\n // 2. Compute prefix sums to find output positions for each bucket\n // 3. Scatter elements to correct positions based on radix value\n // 4. Ensure stable sorting (preserve relative order for equal keys)\n}\n\nextern \"C\"\nvoid radix_sort_multiradix_host(unsigned int* data, int n)\n{\n if (n <= 1) return;\n // Starter behavior: shallow copy will fail non-trivial tests.\n unsigned int* tmp = nullptr;\n CK(cudaMalloc(&tmp, n*sizeof(unsigned int)), \"malloc tmp\");\n CK(cudaMemcpy(tmp, data, n*sizeof(unsigned int), cudaMemcpyDeviceToDevice), \"copy tmp\");\n CK(cudaMemcpy(data, tmp, n*sizeof(unsigned int), cudaMemcpyDeviceToDevice), \"copy back\");\n cudaFree(tmp);\n}\n```", "task_dir": "eval-tasks/ch13-radix-multiradix-fullsort-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch13-radix-naive-1bit-fullsort-single", "question": "Task: ch13-radix-naive-1bit-fullsort-single\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// student_kernel.cu\n// TODO: Implement a naive 1-bit parallel radix sort that fully sorts 32-bit keys\n// across 32 passes (LSB -> MSB), stable per pass.\n//\n// Requirements:\n// - API: extern \"C\" void radix_sort_1bit_host(unsigned int* data, int n)\n// - In-place: modify data in-place\n// - Stability: within each bit-partition, preserve relative order (use zerosBefore(i) / onesBefore(i))\n// - Multi-pass orchestration: 32 passes, swap ping-pong buffers every pass\n// - Correct for arbitrary n (n can be 0)\n// - No OOB writes: only write within [0..n)\n//\n// Hints (not mandatory, but aligned with the reference):\n// - kFlagZeros: flagsZero[i] = 1 if ((x >> bit)&1)==0 else 0\n// - kBlockExclusiveScan + host scan over block sums for robustness\n// - kAddBlockOffsets to turn per-block exclusive scans into global exclusive scan\n// - kScatter uses stable positions:\n// if bit==0: pos = zerosBefore(i)\n// else : pos = totalZeros + (i - zerosBefore(i))\n\n#include <cuda_runtime.h>\n\n// TODO: Implement naive 1-bit radix sort using LSD approach.\n// Contract summary:\n// - Stable: equal elements maintain relative order\n// - Process 1 bit per pass, 32 passes total for uint32_t\n// - Use parallel counting, prefix sum, and scattering\n// - Sort in ascending order\n\nextern \"C\" __global__\nvoid radix_sort_1bit_kernel(unsigned int* __restrict__ data,\n unsigned int* __restrict__ temp,\n int n,\n int bit)\n{\n // TODO: Implement single-bit radix sort pass\n // 1. Count elements with bit=0 and bit=1 using shared memory\n // 2. Compute prefix sums to find output positions\n // 3. Scatter elements to correct positions based on bit value\n // 4. Ensure stable sorting (preserve relative order for equal keys)\n}\n\nextern \"C\"\nvoid radix_sort_1bit_host(unsigned int* data, int n)\n{\n // TODO: Implement host function that orchestrates 32 sorting passes\n // 1. Allocate temporary buffer\n // 2. For each bit position (0 to 31):\n // - Launch radix_sort_1bit_kernel\n // - Swap data and temp pointers\n // 3. Ensure final result is in original data array\n // 4. Clean up temporary buffer\n}\n```", "task_dir": "eval-tasks/ch13-radix-naive-1bit-fullsort-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch13-radix-onepass-multiradix-single", "question": "Task: ch13-radix-onepass-multiradix-single\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// ch13-radix-onepass-multiradix-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n\n// CONTRACT:\n// Implement one *stable* multiradix pass over keys.\n// - keys_d: input keys (length n)\n// - out_d: output keys (length n)\n// - n: number of elements\n// - r: bits per pass (1, 2, or 4)\n// - shift: bit shift for the digit (e.g., 0, r, 2r, ...)\n// Approach expected (typical):\n// 1) extract digits (0..(2^r - 1))\n// 2) per-block histogram -> global array [grid x buckets]\n// 3) host exclusive scan to get global bases & per-block bucket bases\n// 4) stable scatter into out_d using digit, globalBase[b], blockBase[block,b], and local offset within block\n// NOTE: Stability means equal digits preserve the original order.\n\nextern \"C\" void radix_onepass_multiradix(\n const uint32_t* keys_d, uint32_t* out_d,\n int n, int r, int shift);\n\n// TODO: provide your implementation\nextern \"C\" void radix_onepass_multiradix(\n const uint32_t* keys_d, uint32_t* out_d,\n int n, int r, int shift)\n{\n // Implement kernels + host prefix here.\n // You may choose blockDim=256 and compute grid from n.\n // (Any correct stable implementation passes.)\n (void)keys_d; (void)out_d; (void)n; (void)r; (void)shift;\n}\n```", "task_dir": "eval-tasks/ch13-radix-onepass-multiradix-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch14-spmv-csr-thread-per-row-single", "question": "Task: ch14-spmv-csr-thread-per-row-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Assign one thread per row using a grid-stride loop.\n- Accumulate vals[j] * x[colIdx[j]] for j in [rowPtr[row], rowPtr[row+1]) and write y[row].\n- Avoid atomics and guard rows >= m before reading rowPtr[row+1].\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch14-spmv-csr-thread-per-row-single / student_kernel.cu\n#include <cuda_runtime.h>\n\nextern \"C\" __global__\nvoid spmv_csr_kernel(const int* __restrict__ rowPtr,\n const int* __restrict__ colIdx,\n const float* __restrict__ vals,\n const float* __restrict__ x,\n float* __restrict__ y,\n int m)\n{\n // TODO: Assign one thread per row, iterate over the row's nonzeros, and\n // write y[row] without using atomics.\n\n int row = blockIdx.x * blockDim.x + threadIdx.x;\n if (row < m) {\n int start = rowPtr[row];\n int end = rowPtr[row + 1];\n float scratch = 0.0f;\n for (int j = start; j < end; ++j) {\n (void)colIdx[j];\n (void)vals[j];\n }\n if (y) {\n y[row] = scratch; // placeholder leaves output unchanged (all zeros)\n }\n }\n}\n ```", "task_dir": "eval-tasks/ch14-spmv-csr-thread-per-row-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch14-spmv-coo-single", "question": "Task: ch14-spmv-coo-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Launch a grid-stride loop over nnz entries.\n- For each entry, accumulate with atomicAdd(&y[row], vals[k] * x[col]) after bounds checks.\n- Do not modify the input arrays.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch14-spmv-coo-single / student_kernel.cu\n#include <cuda_runtime.h>\n\nextern \"C\" __global__\nvoid spmv_coo_kernel(const int* __restrict__ row_idx,\n const int* __restrict__ col_idx,\n const float* __restrict__ vals,\n const float* __restrict__ x,\n float* __restrict__ y,\n int nnz)\n{\n // TODO: Implement COO-format SpMV using a grid-stride loop and atomicAdd on\n // y[row] to handle duplicate entries.\n\n int k = blockIdx.x * blockDim.x + threadIdx.x;\n if (k < nnz) {\n int r = row_idx[k];\n int c = col_idx[k];\n (void)r;\n (void)c;\n (void)vals[k];\n (void)x;\n (void)y;\n }\n}\n ```", "task_dir": "eval-tasks/ch14-spmv-coo-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch14-spmv-ell-single", "question": "Task: ch14-spmv-ell-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Assign one thread per row using a grid-stride loop.\n- Skip padded slots with colIdx < 0 and accumulate remaining entries into y[row].\n- Initialize each row result before accumulation.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch14-spmv-ell-single / student_kernel.cu\n#include <cuda_runtime.h>\n\nextern \"C\" __global__\nvoid spmv_ell_kernel(const int* __restrict__ colIdx,\n const float* __restrict__ vals,\n const float* __restrict__ x,\n float* __restrict__ y,\n int m, int K)\n{\n // TODO: Assign one thread per row, iterate across the K padded entries,\n // skip negative column indices, and accumulate into y[row].\n //\n // DATA LAYOUT: Row-major ELL storage\n // For row i, the K entries are stored at indices [i*K, i*K+1, ..., i*K+K-1]\n // Example:\n // int base = row * K;\n // for (int t = 0; t < K; ++t) {\n // int col = colIdx[base + t];\n // if (col >= 0) {\n // // accumulate vals[base + t] * x[col]\n // }\n // }\n\n int row = blockIdx.x * blockDim.x + threadIdx.x;\n if (row < m) {\n (void)colIdx;\n (void)vals;\n (void)x;\n if (y) {\n y[row] = 0.0f;\n }\n }\n\n (void)K;\n}\n ```", "task_dir": "eval-tasks/ch14-spmv-ell-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch14-spmv-jds-single", "question": "Task: ch14-spmv-jds-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Implement spmv_jds_kernel to walk jagged diagonals, checking if the current row participates before accumulating.\n- Write results back to original row order using the permutation array.\n- Implement spmv_jds host wrapper to configure and launch the kernel.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch14-spmv-jds-single / student_kernel.cu\n#include <cuda_runtime.h>\n\nextern \"C\" __global__\nvoid spmv_jds_kernel(const int* __restrict__ colJds,\n const float* __restrict__ valJds,\n const int* __restrict__ permute,\n const int* __restrict__ jdPtr,\n const float* __restrict__ x,\n float* __restrict__ y,\n int m, int maxJ)\n{\n // TODO: Traverse the jagged diagonals, accumulate per permuted row, and\n // write results back to the original ordering via permute[]\n if (colJds && valJds && permute && jdPtr && x && y) {\n (void)m;\n (void)maxJ;\n }\n}\n\nextern \"C\" void spmv_jds(const int* colJds, const float* valJds,\n const int* permute, const int* jdPtr,\n const float* x, float* y, int m, int maxJ)\n{\n (void)colJds; (void)valJds; (void)permute; (void)jdPtr;\n (void)x; (void)y; (void)m; (void)maxJ;\n\n // TODO: Configure launch dimensions and invoke spmv_jds_kernel.\n}\n ```", "task_dir": "eval-tasks/ch14-spmv-jds-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch14-spmv-hyb-single", "question": "Task: ch14-spmv-hyb-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Implement spmv_ell_rows_kernel to compute ELL rows without atomics.\n- Implement spmv_coo_accum_kernel as a grid-stride loop that atomicAdds COO overflow contributions.\n- In spmv_hyb, launch the ELL kernel, synchronize, then launch the COO accumulation.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch14-spmv-hyb-single / student_kernel.cu\n#include <cuda_runtime.h>\n\nextern \"C\" __global__\nvoid spmv_ell_rows_kernel(const int* __restrict__ colEll,\n const float* __restrict__ valEll,\n const float* __restrict__ x,\n float* __restrict__ y,\n int m, int K)\n{\n // TODO: Compute the ELL portion of the HYB product row-by-row.\n //\n // DATA LAYOUT: Row-major ELL storage\n // For row i, the K entries are stored at indices [i*K, i*K+1, ..., i*K+K-1]\n // Example:\n // int row = blockIdx.x * blockDim.x + threadIdx.x;\n // if (row < m) {\n // float sum = 0.0f;\n // int base = row * K;\n // for (int t = 0; t < K; ++t) {\n // int col = colEll[base + t];\n // if (col >= 0) sum += valEll[base + t] * x[col];\n // }\n // y[row] = sum;\n // }\n if (colEll && valEll && x && y) {\n (void)m;\n (void)K;\n }\n}\n\nextern \"C\" __global__\nvoid spmv_coo_accum_kernel(const int* __restrict__ rowCoo,\n const int* __restrict__ colCoo,\n const float* __restrict__ valCoo,\n const float* __restrict__ x,\n float* __restrict__ y,\n int nnzC)\n{\n // TODO: Accumulate the COO tail using atomicAdd.\n if (rowCoo && colCoo && valCoo && x && y) {\n (void)nnzC;\n }\n}\n\nextern \"C\" void spmv_hyb(const int* colEll, const float* valEll, int m, int K,\n const int* rowCoo, const int* colCoo, const float* valCoo, int nnzC,\n const float* x, float* y)\n{\n (void)colEll; (void)valEll; (void)m; (void)K;\n (void)rowCoo; (void)colCoo; (void)valCoo; (void)nnzC;\n (void)x; (void)y;\n\n // TODO: Launch spmv_ell_rows_kernel followed by spmv_coo_accum_kernel to\n // form the full HYB SpMV result.\n}\n ```", "task_dir": "eval-tasks/ch14-spmv-hyb-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch14-coo-to-csr-single", "question": "Task: ch14-coo-to-csr-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Build rowCounts on the device via a grid-stride histogram of row indices.\n- Perform an exclusive scan of rowCounts to obtain rowPtr and ensure rowPtr[m] == nnz.\n- Use a per-row cursor (rowNext) to scatter COO entries into CSR while preserving input order.\n- Handle empty rows and invalid rows defensively; do not combine duplicate entries.\n- Keep the input COO arrays unchanged and avoid out-of-bounds writes.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch14-coo-to-csr-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <cstdio>\n\n// Convert an unsorted COO triple (row/col/val) into CSR form. The reference\n// shows one correct solution; this stub leaves the heavy lifting to the model.\n\nextern \"C\" __global__\nvoid k_hist_rows(const int* __restrict__ row, int nnz, int m, int* __restrict__ rowCounts)\n{\n // TODO: Grid-stride histogram of row indices into rowCounts without\n // accessing out-of-range rows.\n\n if (blockIdx.x == 0 && threadIdx.x == 0 && nnz > 0 && m > 0) {\n atomicAdd(&rowCounts[0], 0);\n }\n}\n\nextern \"C\" __global__\nvoid k_stable_scatter_single(const int* __restrict__ row,\n const int* __restrict__ col,\n const float* __restrict__ val,\n int nnz, int m,\n int* __restrict__ rowNext,\n int* __restrict__ colCSR,\n float* __restrict__ valCSR)\n{\n // TODO: Walk COO entries in order, writing to CSR using per-row cursors.\n if (blockIdx.x == 0 && threadIdx.x == 0 && nnz > 0 && m > 0) {\n rowNext[0] += 0;\n colCSR[0] += 0;\n valCSR[0] += 0.0f;\n }\n}\n\nextern \"C\" void coo_to_csr(const int* d_row, const int* d_col, const float* d_val,\n int nnz, int m, int /*n*/,\n int* d_rowPtr, int* d_colCSR, float* d_valCSR)\n{\n // TODO: Allocate row counts, launch k_hist_rows, perform an exclusive scan\n // on the host (or device), then launch k_stable_scatter_single to emit the\n // CSR structure. Ensure rowPtr[m] == nnz and input ordering within each\n // row is preserved.\n\n if (m <= 0) {\n int zero = 0;\n cudaMemcpy(d_rowPtr, &zero, sizeof(int), cudaMemcpyHostToDevice);\n return;\n }\n\n // Placeholder touch so the stub compiles when invoked by tests.\n cudaMemset(d_colCSR, 0, nnz * sizeof(int));\n cudaMemset(d_valCSR, 0, nnz * sizeof(float));\n}\n ```", "task_dir": "eval-tasks/ch14-coo-to-csr-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch15-bfs-push-single", "question": "Task: ch15-bfs-push-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Initialize levels to INF_LVL, set level[src] = 0, and seed the frontier array.\n- In each iteration, run a push kernel that traverses neighbors with atomicCAS discovery and atomicAdd enqueue.\n- Swap frontier buffers between iterations until the frontier is empty, then free temporary buffers.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch15-bfs-push-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <limits.h>\n\n#ifndef INF_LVL\n#define INF_LVL 0x3f3f3f3f\n#endif\n\n__global__ void _init_levels(int* __restrict__ level, int V, int src){\n int i = blockIdx.x * blockDim.x + threadIdx.x;\n if (i < V) {\n level[i] = INF_LVL;\n }\n if (src >= 0 && src < V && i == src) {\n level[i] = 0;\n }\n}\n\n__global__ void bfs_push_kernel(const int* __restrict__ row_ptr,\n const int* __restrict__ col_idx,\n const int* __restrict__ frontier,\n int frontier_size,\n int* __restrict__ next_frontier,\n int* __restrict__ next_frontier_size,\n int* __restrict__ level,\n int cur_level)\n{\n // TODO: Visit outgoing edges of the frontier, atomically claim newly\n // discovered vertices, and append them to next_frontier.\n if (row_ptr && col_idx && frontier && next_frontier && next_frontier_size && level) {\n (void)frontier_size;\n (void)cur_level;\n }\n}\n\nextern \"C\" void bfs_push_gpu(const int* d_row_ptr,\n const int* d_col_idx,\n int V, int E,\n int src,\n int* d_level)\n{\n (void)d_row_ptr;\n (void)d_col_idx;\n (void)E;\n\n if (V <= 0) {\n return;\n }\n\n dim3 b(256), g((V + b.x - 1) / b.x);\n _init_levels<<<g, b>>>(d_level, V, src);\n // TODO: Allocate frontier buffers, iterate until the frontier is empty, and\n // free any resources allocated on the device.\n}\n ```", "task_dir": "eval-tasks/ch15-bfs-push-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch15-bfs-pull-single", "question": "Task: ch15-bfs-pull-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Initialize levels to INF_LVL, set level[src] = 0, and seed the in_frontier bitmap.\n- Each iteration clears out_frontier, runs a pull kernel that scans neighbors of undiscovered vertices, and counts discoveries.\n- Swap bitmaps when progress occurs and terminate when no new vertices are found.\n- Release all temporary device allocations at exit.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch15-bfs-pull-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <limits.h>\n\n#ifndef INF_LVL\n#define INF_LVL 0x3f3f3f3f\n#endif\n\n__global__ void _init_levels_pull(int* __restrict__ level, int V, int src){\n // TODO: Initialize the level array and set the source distance to zero.\n int i = blockIdx.x * blockDim.x + threadIdx.x;\n if (i < V) {\n level[i] = INF_LVL;\n }\n if (src >= 0 && src < V && i == src) {\n level[i] = 0;\n }\n}\n\n__global__ void _clear_bitmap(unsigned char* __restrict__ bm, int V){\n int i = blockIdx.x * blockDim.x + threadIdx.x;\n if (i < V) {\n bm[i] = 0;\n }\n}\n\n__global__ void _set_single(unsigned char* __restrict__ bm, int idx){\n if (threadIdx.x == 0 && blockIdx.x == 0 && idx >= 0) {\n bm[idx] = 1;\n }\n}\n\n__global__ void bfs_pull_kernel(const int* __restrict__ row_ptr,\n const int* __restrict__ col_idx,\n const unsigned char* __restrict__ in_frontier,\n unsigned char* __restrict__ out_frontier,\n int* __restrict__ level,\n int cur_level,\n int V,\n int* __restrict__ next_count)\n{\n // TODO: For undiscovered vertices, scan neighbors to detect if any reside\n // in the current frontier bitmap.\n int v = blockIdx.x * blockDim.x + threadIdx.x;\n if (v < V) {\n (void)row_ptr;\n (void)col_idx;\n (void)in_frontier;\n (void)out_frontier;\n (void)level;\n (void)cur_level;\n (void)next_count;\n }\n}\n\nextern \"C\" void bfs_pull_gpu(const int* d_row_ptr,\n const int* d_col_idx,\n int V, int E,\n int src,\n int* d_level)\n{\n (void)d_row_ptr;\n (void)d_col_idx;\n (void)E;\n\n if (V <= 0) {\n return;\n }\n\n dim3 b(256), g((V + b.x - 1) / b.x);\n _init_levels_pull<<<g, b>>>(d_level, V, src);\n // TODO: Allocate bitmaps, iterate levels until no new vertices are found.\n}\n ```", "task_dir": "eval-tasks/ch15-bfs-pull-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch15-bfs-direction-optimized-single", "question": "Task: ch15-bfs-direction-optimized-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Initialize levels to INF_LVL (except src) and allocate frontier buffers plus a bitmap.\n- Implement push and pull kernels that discover neighbors with atomicCAS/bitmap checks.\n- Switch to pull mode when frontier_size > V/16 and back to push when frontier_size < V/64.\n- Loop until the frontier is empty, freeing temporary buffers afterwards.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch15-bfs-direction-optimized-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <limits.h>\n\n#ifndef INF_LVL\n#define INF_LVL 0x3f3f3f3f\n#endif\n\n__global__ void _init_levels(int* __restrict__ level, int V){\n // TODO: Initialize levels to INF_LVL.\n int i = blockIdx.x * blockDim.x + threadIdx.x;\n if (i < V) {\n level[i] = INF_LVL;\n }\n}\n\n__global__ void _clear_bitmap(unsigned char* __restrict__ bm, int V){\n // TODO: Zero bitmap entries in parallel.\n int i = blockIdx.x * blockDim.x + threadIdx.x;\n if (i < V) {\n bm[i] = 0;\n }\n}\n\n__global__ void _mark_bitmap_from_list(const int* __restrict__ list, int n,\n unsigned char* __restrict__ bm){\n // TODO: Mark current frontier vertices in the bitmap.\n int i = blockIdx.x * blockDim.x + threadIdx.x;\n if (i < n) {\n (void)list[i];\n (void)bm;\n }\n}\n\n__global__ void k_push(const int* __restrict__ row_ptr,\n const int* __restrict__ col_idx,\n const int* __restrict__ frontier,\n int frontier_size,\n int* __restrict__ next_frontier,\n int* __restrict__ next_size,\n int* __restrict__ level,\n int cur_level)\n{\n // TODO: Push-based relaxation using atomicCAS/atomicAdd.\n if (row_ptr && col_idx && frontier && next_frontier && next_size && level) {\n (void)frontier_size;\n (void)cur_level;\n }\n}\n\n__global__ void k_pull(const int* __restrict__ row_ptr,\n const int* __restrict__ col_idx,\n const unsigned char* __restrict__ in_frontier,\n int* __restrict__ next_frontier,\n int* __restrict__ next_size,\n int* __restrict__ level,\n int cur_level,\n int V)\n{\n // TODO: Pull-based relaxation that scans undiscovered vertices.\n if (row_ptr && col_idx && in_frontier && next_frontier && next_size && level) {\n (void)cur_level;\n (void)V;\n }\n}\n\nextern \"C\" void bfs_direction_optimized_gpu(const int* d_row_ptr,\n const int* d_col_idx,\n int V, int E,\n int src,\n int* d_level)\n{\n // TODO: Alternate between push and pull phases based on the frontier size\n // thresholds described in the README.\n if (V <= 0 || E < 0) {\n return;\n }\n\n dim3 b(256), g((V + b.x - 1) / b.x);\n _init_levels<<<g, b>>>(d_level, V);\n if (src >= 0 && src < V) {\n cudaMemcpy(d_level + src, &src, sizeof(int), cudaMemcpyHostToDevice);\n }\n}\n ```", "task_dir": "eval-tasks/ch15-bfs-direction-optimized-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch15-bfs-edge-centric-single", "question": "Task: ch15-bfs-edge-centric-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Initialize levels to INF_LVL with level[src] = 0.\n- Optionally precompute an edge->vertex mapping for efficient edge traversal.\n- Iteratively launch an edge-centric kernel that marks newly discovered vertices and sets an activity flag.\n- Stop when no edge discovers new vertices; CSR arrays must remain unchanged.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch15-bfs-edge-centric-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <limits.h>\n\n#ifndef INF_LVL\n#define INF_LVL 0x3f3f3f3f\n#endif\n\n__global__ void _init_levels_edge(int* __restrict__ level, int V, int src){\n // TODO: Initialize the level array and seed the source vertex.\n int i = blockIdx.x * blockDim.x + threadIdx.x;\n if (i < V) {\n level[i] = INF_LVL;\n }\n if (i == src) {\n level[i] = 0;\n }\n}\n\n__global__ void bfs_edge_centric_kernel(const int* __restrict__ row_ptr,\n const int* __restrict__ col_idx,\n int* __restrict__ level,\n int cur_level,\n int E,\n int* __restrict__ active_found)\n{\n // TODO: Iterate one edge per thread, discover newly reached vertices, and\n // signal progress via active_found.\n if (blockIdx.x * blockDim.x + threadIdx.x < E) {\n (void)row_ptr;\n (void)col_idx;\n (void)level;\n (void)cur_level;\n (void)active_found;\n }\n}\n\nextern \"C\" void bfs_edge_centric_gpu(const int* d_row_ptr,\n const int* d_col_idx,\n int V, int E,\n int src,\n int* d_level)\n{\n // TODO: Alternate between edge-centric relaxations until no progress is\n // made. Maintain device-side flags and handle empty graphs gracefully.\n if (V <= 0 || E < 0) {\n return;\n }\n\n dim3 b(128);\n dim3 g((V + b.x - 1) / b.x);\n _init_levels_edge<<<g, b>>>(d_level, V, src);\n}\n ```", "task_dir": "eval-tasks/ch15-bfs-edge-centric-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch16-conv2d-forward-single", "question": "Task: ch16-conv2d-forward-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Decode each thread id into (n, oc, oh, ow) for NCHW output layout.\n- Accumulate input[n,c,ih,iw] * weight[oc,c,kh,kw] over channels and kernel, skipping out-of-bounds taps.\n- Add bias[oc] if provided and write to output; guard threads with tid >= total outputs.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch16-conv2d-forward-single / student_kernel.cu\n#include <cuda_runtime.h>\n\nextern \"C\" __global__\nvoid conv2d_forward_kernel(\n const float* __restrict__ input,\n const float* __restrict__ weight,\n const float* __restrict__ bias,\n float* __restrict__ output,\n int N, int C, int H, int W,\n int OC, int kernel_h, int kernel_w,\n int stride_h, int stride_w,\n int out_h, int out_w)\n{\n (void)input;\n (void)weight;\n (void)bias;\n (void)N; (void)C; (void)H; (void)W;\n (void)OC; (void)kernel_h; (void)kernel_w;\n (void)stride_h; (void)stride_w;\n (void)out_h; (void)out_w;\n\n // TODO: Map each thread to a unique (n, oc, oh, ow) output element and\n // perform the convolution accumulation over the input tensor.\n\n int tid = blockIdx.x * blockDim.x + threadIdx.x;\n long long total = 1LL * N * OC * out_h * out_w;\n if ((long long)tid >= total) {\n return;\n }\n\n if (output) {\n output[tid] = 0.0f; // placeholder value so the stub compiles\n }\n}\n ```", "task_dir": "eval-tasks/ch16-conv2d-forward-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch16-maxpool2d-forward-single", "question": "Task: ch16-maxpool2d-forward-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Assign one thread per output element (n, c, oh, ow).\n- Scan the pooling window, tracking both maximum value and argmax index (kh*kernel_w + kw).\n- Store the max into output and argmax into indices; initialize indices to -1.\n- Guard threads with tid >= total outputs.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch16-maxpool2d-forward-single / student_kernel.cu\n#include <cuda_runtime.h>\n\nextern \"C\" __global__\nvoid maxpool2d_forward_kernel(const float* input, float* output, int* indices,\n int batch_size, int channels,\n int height, int width,\n int kernel_h, int kernel_w,\n int stride_h, int stride_w,\n int out_h, int out_w)\n{\n (void)input;\n (void)batch_size; (void)channels;\n (void)height; (void)width;\n (void)kernel_h; (void)kernel_w;\n (void)stride_h; (void)stride_w;\n (void)out_h; (void)out_w;\n\n // TODO: For each thread, compute the maximum value within the pooling\n // window and track the argmax (flattened kernel index).\n\n int tid = blockIdx.x * blockDim.x + threadIdx.x;\n long long total = 1LL * batch_size * channels * out_h * out_w;\n if ((long long)tid >= total) {\n return;\n }\n\n if (output) {\n output[tid] = 0.0f;\n }\n if (indices) {\n indices[tid] = -1;\n }\n}\n ```", "task_dir": "eval-tasks/ch16-maxpool2d-forward-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch17-fhd-accumulate-single", "question": "Task: ch17-fhd-accumulate-single\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// ch17-fhd-accumulate-single/student_kernel.cu\n#include <cuda_runtime.h>\n#include <math_constants.h>\n\nextern \"C\" __global__\nvoid fhd_accumulate_kernel(const float* __restrict__ rPhi,\n const float* __restrict__ iPhi,\n const float* __restrict__ rD,\n const float* __restrict__ iD,\n const float* __restrict__ kx,\n const float* __restrict__ ky,\n const float* __restrict__ kz,\n const float* __restrict__ x,\n const float* __restrict__ y,\n const float* __restrict__ z,\n int M, int N,\n float* __restrict__ rFhD,\n float* __restrict__ iFhD)\n{\n // TODO:\n // - 1 thread per n (global id = blockIdx.x*blockDim.x + threadIdx.x)\n // - Guard: if (n >= N) return;\n // - Load x[n], y[n], z[n] into registers\n // - Accumulate over m=0..M-1:\n // rmu = rPhi[m]*rD[m] + iPhi[m]*iD[m]\n // imu = rPhi[m]*iD[m] - iPhi[m]*rD[m]\n // ang = 2*pi*(kx[m]*xn + ky[m]*yn + kz[m]*zn)\n // c = cosf(ang); s = sinf(ang)\n // r_acc += rmu*c - imu*s\n // i_acc += imu*c + rmu*s\n // - Finally: rFhD[n] += r_acc; iFhD[n] += i_acc;\n\n // TODO: Implement the FHD accumulation kernel here\n}\n```", "task_dir": "eval-tasks/ch17-fhd-accumulate-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch17-fhd-fission-two-kernels-single", "question": "Task: ch17-fhd-fission-two-kernels-single\nTarget file: student_kernel.cu\n\nGoal: Split the fissioned FHD computation into two kernels. First, compute the complex products `(rMu, iMu)` for every frequency `m`. Second, accumulate those precomputed values across all `m` for each spatial sample `n`, matching the fused reference implementation.\n\nKey requirements:\n- Keep the inputs read-only and accumulate results into `rFhD` / `iFhD` using `+=`.\n- Use the provided TWO_PI constant when computing `ang`.\n- Launch `compute_mu_kernel` with one thread per `m`; launch `fhd_accumulate_mu_kernel` with one thread per `n`.\n- Preserve the maths shown in the comments and guard index bounds.\n\nSkeleton:\n```cuda\n// ch17-fhd-fission-two-kernels-single/student_kernel.cu\n#include <cuda_runtime.h>\n\n// TODO (A): compute_mu_kernel\n// Each thread handles one m (if m < M):\n// rMu[m] = rPhi[m]*rD[m] + iPhi[m]*iD[m]\n// iMu[m] = rPhi[m]*iD[m] - iPhi[m]*rD[m]\nextern \"C\" __global__\nvoid compute_mu_kernel(const float* __restrict__ rPhi,\n const float* __restrict__ iPhi,\n const float* __restrict__ rD,\n const float* __restrict__ iD,\n int M,\n float* __restrict__ rMu,\n float* __restrict__ iMu)\n{\n // TODO: Implement complex multiplication for each m\n // Hint: int m = blockIdx.x * blockDim.x + threadIdx.x;\n}\n\n// TODO (B): fhd_accumulate_mu_kernel\n// One thread per n; loop over m using precomputed rMu/iMu.\n// Accumulate into rFhD[n], iFhD[n] using Fourier transform formula.\n//\n// IMPORTANT FORMULAS:\n// 1. Angle calculation must include TWO_PI (2pi):\n// const float TWO_PI = 6.2831853071795864769f;\n// float ang = TWO_PI * (kx[m]*xn + ky[m]*yn + kz[m]*zn);\n//\n// 2. Use ACCUMULATION (+=), not assignment (=), because this kernel may be\n// called multiple times:\n// rFhD[n] += r_acc; // NOT rFhD[n] = r_acc;\n// iFhD[n] += i_acc; // NOT iFhD[n] = i_acc;\n//\n// Algorithm:\n// int n = blockIdx.x * blockDim.x + threadIdx.x;\n// if (n < N) {\n// float r_acc = 0.0f, i_acc = 0.0f;\n// for (int m = 0; m < M; ++m) {\n// float ang = TWO_PI * (kx[m]*x[n] + ky[m]*y[n] + kz[m]*z[n]);\n// float c = cosf(ang), s = sinf(ang);\n// r_acc += rMu[m] * c - iMu[m] * s;\n// i_acc += iMu[m] * c + rMu[m] * s;\n// }\n// rFhD[n] += r_acc;\n// iFhD[n] += i_acc;\n// }\nextern \"C\" __global__\nvoid fhd_accumulate_mu_kernel(const float* __restrict__ rMu,\n const float* __restrict__ iMu,\n const float* __restrict__ kx,\n const float* __restrict__ ky,\n const float* __restrict__ kz,\n const float* __restrict__ x,\n const float* __restrict__ y,\n const float* __restrict__ z,\n int M, int N,\n float* __restrict__ rFhD,\n float* __restrict__ iFhD)\n{\n // TODO: Implement accumulation using precomputed rMu, iMu (see formula above)\n}\n```", "task_dir": "eval-tasks/ch17-fhd-fission-two-kernels-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch18-energy-scatter-single", "question": "Task: ch18-energy-scatter-single\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// ch18-energy-scatter-single / student_kernel.cu\n//\n// Implement Fig. 18.5 (SCATTER): one thread per atom, looping over all (i,j)\n// grid points on a fixed z-slice and ATOMICALLY accumulating into energygrid.\n//\n// CONTRACT\n// - Constant memory holds a *chunk* of atoms: __constant__ float atoms[CHUNK_SIZE*4]\n// as (x,y,z,charge) AoS, 4 floats per atom.\n// - The test harness uploads atoms chunk-by-chunk via cudaMemcpyToSymbol and then\n// launches the kernel once per chunk, accumulating into the same output slice.\n// - Kernel params:\n// energygrid : pointer to [grid.x * grid.y * grid.z] floats\n// grid : logical 3D grid dimensions (x,y,z) for indexing\n// gridspacing : spacing used to compute x = i*gridspacing, y = j*gridspacing\n// z : world-space z coordinate of the slice (must be multiple of gridspacing)\n// atoms_in_chunk: number of atoms loaded in constant memory for this launch (<= CHUNK_SIZE)\n// start_atom : global offset of first atom of the chunk (not needed for correct math;\n// included so your signature matches reference; you may ignore it)\n// - Must use atomicAdd on energygrid writes (scatter means threads collide on same cell).\n// - Bounds: guard i in [0,grid.x), j in [0,grid.y).\n//\n// HINTS\n// - Compute k = int(z / gridspacing) once per kernel; assume z aligns.\n// - Thread id tid covers one atom in [0, atoms_in_chunk).\n// - Load (ax,ay,az,q) from constant memory as atoms[4*tid + {0,1,2,3}].\n// - For each j row, precompute y and (y-ay), (z-az) and reuse (dy*dy + dz*dz).\n// - Use sqrtf for single-precision.\n// - Use 1D blocks and grids for atom threads (e.g., blockDim.x = 256).\n//\n// PERFORMANCE is not graded here-correctness, safety (no OOB), and atomicity are.\n\n#ifndef CHUNK_SIZE\n#define CHUNK_SIZE 256\n#endif\n\n__constant__ float atoms[CHUNK_SIZE * 4];\n\nextern \"C\" __global__\nvoid cenergyScatterKernel(float* __restrict__ energygrid,\n dim3 grid,\n float gridspacing,\n float z,\n int atoms_in_chunk,\n int /*start_atom_unused*/) {\n // TODO: Implement scatter kernel (one thread per atom)\n // 1. Get thread ID and bounds check\n // 2. Load atom data from constant memory\n // 3. Compute z-slice index k\n // 4. Loop over all (i,j) grid points in the slice\n // 5. For each grid point, compute distance and contribution\n // 6. Use atomicAdd to accumulate into energygrid\n}\n```", "task_dir": "eval-tasks/ch18-energy-scatter-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch18-energy-gather-single", "question": "Task: ch18-energy-gather-single\nTarget file: student_kernel.cu\n\nGoal: Implement the gather variant of the molecular energy kernel. Each thread owns one grid cell, loops over the atoms in the constant-memory chunk, accumulates a private sum, and writes exactly once to the output grid.\n\nKey requirements:\n- Use a 2D launch (threadIdx.x/y) to cover the grid tile and guard indices.\n- Loop over all atoms in the uploaded chunk and accumulate into a private `energy` value.\n- Write exactly one `+=` update per grid cell (no atomics).\n- Follow the provided formulas for indexing and distance accumulation.\n\nSkeleton:\n```cuda\n// ch18-energy-gather-single / student_kernel.cu\n//\n// Implement Fig. 18.6 (GATHER): one thread per grid point (on a fixed z-slice).\n// Each thread loops over all atoms in the current constant-memory chunk and\n// accumulates a private sum, then writes exactly once to energygrid (+=).\n//\n// CONTRACT\n// - Constant memory holds a *chunk* of atoms: __constant__ float atoms[CHUNK_SIZE*4].\n// - Kernel params:\n// energygrid : pointer to [grid.x * grid.y * grid.z] floats\n// grid : logical 3D grid dimensions (x,y,z) for indexing\n// gridspacing : spacing for x=i*h, y=j*h\n// z : world-space z of the slice\n// atoms_in_chunk: number of atoms currently loaded (<= CHUNK_SIZE)\n// start_atom : global offset of first atom in the chunk (not needed here)\n// - NO atomics: each thread owns its output cell and does `energygrid[idx] += local_sum`.\n// - 2D launch is expected (e.g., block=(16,16)).\n//\n// HINTS\n// - Compute (dy*dy + dz*dz) per row and reuse where reasonable.\n// - Use sqrtf and a small denominator clamp to avoid division by zero.\n\n#ifndef CHUNK_SIZE\n#define CHUNK_SIZE 256\n#endif\n\n__constant__ float atoms[CHUNK_SIZE * 4];\n\nextern \"C\" __global__\nvoid cenergyGatherKernel(float* __restrict__ energygrid,\n dim3 grid,\n float gridspacing,\n float z,\n int atoms_in_chunk,\n int /*start_atom_unused*/) {\n // TODO: Implement gather kernel (one thread per grid cell)\n // 1. Get 2D thread indices (i,j) for grid position\n // 2. Bounds check against grid dimensions (grid.x, grid.y)\n // 3. Compute z-slice index and validate:\n // int k = int(z / gridspacing);\n // if (k < 0 || k >= (int)grid.z) return;\n // 4. Compute world-space coordinates (x = i*gridspacing, y = j*gridspacing)\n // 5. Loop over all atoms in the current chunk\n // 6. For each atom, compute distance and contribution\n // 7. Compute 3D linearized index into energygrid:\n // size_t idx = (size_t)grid.x * grid.y * k + (size_t)grid.x * j + (size_t)i;\n // 8. Accumulate private sum, then write once: energygrid[idx] += sum\n}\n```", "task_dir": "eval-tasks/ch18-energy-gather-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch18-energy-gather-coarsened-single", "question": "Task: ch18-energy-gather-coarsened-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Compute base_i and j from block/thread indices and determine k from z/gridspacing.\n- Maintain a register array of size COARSEN_FACTOR to accumulate per-thread contributions.\n- Loop over atoms_in_chunk, accumulating q / max(distance, 1e-12f) for owned cells.\n- Write each owned grid cell exactly once without atomics, guarding grid bounds.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch18-energy-gather-coarsened-single / student_kernel.cu\n//\n// Implement a coarsened gather kernel (Fig. 18.8). The reference solution\n// shows the full algorithm; here we deliberately leave the core computation as\n// a TODO so the student model must fill it in.\n\n#ifndef CHUNK_SIZE\n#define CHUNK_SIZE 256\n#endif\n#ifndef COARSEN_FACTOR\n#define COARSEN_FACTOR 8\n#endif\n\n__constant__ float atoms[CHUNK_SIZE * 4];\n\nextern \"C\" __global__\nvoid cenergyCoarsenKernel(float* __restrict__ energygrid,\n dim3 grid,\n float gridspacing,\n float z,\n int /*atoms_in_chunk*/,\n int /*start_atom_unused*/) {\n int base_i = blockIdx.x * (blockDim.x * COARSEN_FACTOR) + threadIdx.x * COARSEN_FACTOR;\n int j = blockIdx.y * blockDim.y + threadIdx.y;\n\n if (j < 0 || j >= (int)grid.y) {\n return;\n }\n\n int k = int(z / gridspacing);\n if (k < 0 || k >= (int)grid.z) {\n return;\n }\n\n // TODO: Implement thread-coarsened energy accumulation:\n // - Each thread processes COARSEN_FACTOR grid points (base_i to base_i + COARSEN_FACTOR - 1)\n // - For each grid point (i, j, k), accumulate energy contributions from all atoms in constant memory\n // - Atoms are stored in constant memory as [x, y, z, charge] in structure-of-arrays layout\n // - Compute distance from each atom to grid point: dx, dy, dz\n // - Energy contribution: charge / sqrt(dx^2 + dy^2 + dz^2)\n // - Add accumulated energy to energygrid[idx] (no atomics needed - each thread owns its points)\n // - Use proper bounds checking for i in [0, grid.x)\n //\n // Hints:\n // - Grid point coordinates: x = i * gridspacing, y = j * gridspacing, z already given\n // - Atom coordinates: atoms[a*4+0], atoms[a*4+1], atoms[a*4+2], atoms[a*4+3] (charge)\n // - 3D array index: idx = k * grid.x * grid.y + j * grid.x + i\n}\n ```", "task_dir": "eval-tasks/ch18-energy-gather-coarsened-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch18-energy-gather-coarsened-coalesced-single", "question": "Task: ch18-energy-gather-coarsened-coalesced-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Compute base_i, j, and k, and use a register array of length COARSEN_FACTOR to accumulate contributions.\n- Stage per-thread results into shared memory for coalesced writes.\n- After accumulation, cooperatively flush the shared buffer back to global memory with strided writes.\n- Avoid atomics and guard all grid bounds.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch18-energy-gather-coarsened-coalesced-single / student_kernel.cu\n//\n// Implement a coarsened gather kernel that stages results in shared memory and\n// flushes them with coalesced writes. This starter file intentionally leaves the\n// body empty so the model must supply the full algorithm.\n\n#ifndef CHUNK_SIZE\n#define CHUNK_SIZE 256\n#endif\n#ifndef COARSEN_FACTOR\n#define COARSEN_FACTOR 8\n#endif\n\n__constant__ float atoms[CHUNK_SIZE * 4];\n\nextern \"C\" __global__\nvoid cenergyCoarsenCoalescedKernel(float* __restrict__ energygrid,\n dim3 grid,\n float gridspacing,\n float z,\n int atoms_in_chunk,\n int /*start_atom_unused*/) {\n int base_i = blockIdx.x * (blockDim.x * COARSEN_FACTOR) + threadIdx.x * COARSEN_FACTOR;\n int j = blockIdx.y * blockDim.y + threadIdx.y;\n\n if (j < 0 || j >= (int)grid.y) {\n return;\n }\n\n int k = int(z / gridspacing);\n if (k < 0 || k >= (int)grid.z) {\n return;\n }\n\n // TODO: Implement coarsened energy accumulation with shared memory and coalesced writes:\n //\n // IMPORTANT: This kernel is called multiple times (once per atom chunk).\n // You MUST use += to accumulate results, NOT = to overwrite!\n //\n // Step 1: Declare shared memory buffer\n // - Size: blockDim.x * blockDim.y * COARSEN_FACTOR floats\n // - Layout: [threadIdx.y][threadIdx.x * COARSEN_FACTOR + c] for coalesced access\n //\n // Step 2: Accumulate energy for COARSEN_FACTOR grid points\n // - For each c in [0, COARSEN_FACTOR), compute i = base_i + c\n // - For each grid point (i, j, k), loop over atoms_in_chunk atoms (NOT CHUNK_SIZE!)\n // - Compute distance and energy contribution (charge / distance)\n // - Store accumulated energy in shared memory (NOT directly to global memory yet)\n //\n // Step 3: Synchronize threads\n // - __syncthreads() to ensure all threads finished computation\n //\n // Step 4: Coalesced write to global memory with ACCUMULATION\n // - Reorganize shared memory data to enable coalesced writes\n // - Each thread writes COARSEN_FACTOR consecutive values from shared memory\n // - USE += NOT =: energygrid[idx] += value (kernel called multiple times!)\n // - This ensures warps write contiguous memory locations (coalesced access)\n //\n // Hints:\n // - Shared memory avoids uncoalesced scattered writes\n // - Atoms layout: atoms[a*4+0], atoms[a*4+1], atoms[a*4+2] (x,y,z), atoms[a*4+3] (charge)\n // - Grid coordinates: x = i * gridspacing, y = j * gridspacing, z given\n // - Global index: idx = k * grid.x * grid.y + j * grid.x + i\n}\n ```", "task_dir": "eval-tasks/ch18-energy-gather-coarsened-coalesced-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch20-stencil-25pt-single-gpu-single", "question": "Task: ch20-stencil-25pt-single-gpu-single\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n#include <cuda_runtime.h>\n\n// TODO: Implement a single-GPU axis-aligned 25-point stencil with radius R=4.\n// Contract:\n// - Input/Output are dense 3D grids (dimx*dimy*dimz), row-major:\n// idx(i,j,k) = (k*dimy + j)*dimx + i\n// - For interior cells (i,j,k [4 .. dim-1-4]) compute:\n// out = w0*center + _{d=1..4} w[d] * (d along x + d along y + d along z)\n// with weights: w0=0.5, w1=0.10, w2=0.05, w3=0.025, w4=0.0125\n// - Boundary cells (within 4 of any face) must be copy-through: out=in\n// - Reasonable 3D launch config and synchronization\n//\n// Suggested steps:\n// 1) write idx3 helper\n// 2) in-kernel boundary test => copy-through\n// 3) accumulate using a small unrolled loop d=1..4\n// 4) host wrapper launches kernel\n\nstatic inline __device__ size_t idx3(int i,int j,int k,int dx,int dy){\n return (size_t(k)*dy + j)*dx + i;\n}\n\n__global__ void stencil25_kernel(const float* __restrict__ in,\n float* __restrict__ out,\n int dimx, int dimy, int dimz)\n{\n // TODO\n}\n\nextern \"C\" void stencil25_single_gpu(const float* d_in, float* d_out,\n int dimx, int dimy, int dimz)\n{\n // TODO: choose block/grid and launch kernel, then cudaDeviceSynchronize()\n}\n```", "task_dir": "eval-tasks/ch20-stencil-25pt-single-gpu-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch20-stencil-25pt-slab-stage1-boundary", "question": "Task: ch20-stencil-25pt-slab-stage1-boundary\nTarget file: student_kernel.cu\n\nGoal: Update only the Stage-1 boundary planes (the first and last four owned slices) of a 3D 25-point stencil on a slab with halo padding. Leave interior planes untouched; rely on halos that already exist for this phase.\n\nKey requirements:\n- Operate on input `d_in` / output `d_out` arranged with four halo planes on each side (`owned z [4 .. 4+dimz-1]`).\n- Compute stencil weights using the provided coefficients for 1..4 offsets in x, y, and z.\n- When i or j are within the halo distance, copy-through rather than reading out-of-bounds.\n- Do not modify interior or halo planes; only Stage-1 boundary planes should be written.\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n\n// TODO: Implement Stage-1 boundary update for axis-aligned 25-point stencil (R=4)\n// Local z layout has halos: total_z = dimz + 8\n// Owned region z [4 .. 4+dimz-1]\n// Stage-1 planes: [4..7] and [4+dimz-4 .. 4+dimz-1]\n// Within Stage-1 planes:\n// - For i/j interior (>=4 and <dim-4) compute stencil\n// - For i/j near faces, copy-through\n// Do not write interior planes or halos.\n//\n// STENCIL WEIGHTS: Use distance-based weighted stencil (NOT simple averaging!)\n// - w0 = 0.5 (center point)\n// - w1 = 0.10 (distance 1 neighbors: 1 in each axis)\n// - w2 = 0.05 (distance 2 neighbors: 2 in each axis)\n// - w3 = 0.025 (distance 3 neighbors: 3 in each axis)\n// - w4 = 0.0125 (distance 4 neighbors: 4 in each axis)\n//\n// Formula: out[i,j,k] = w0 * in[i,j,k]\n// + w1 * (in[i1,j,k] + in[i,j1,k] + in[i,j,k1])\n// + w2 * (in[i2,j,k] + in[i,j2,k] + in[i,j,k2])\n// + w3 * (in[i3,j,k] + in[i,j3,k] + in[i,j,k3])\n// + w4 * (in[i4,j,k] + in[i,j4,k] + in[i,j,k4])\n\nstatic inline __device__ size_t idx3(int i,int j,int k,int dx,int dy){\n return (size_t(k)*dy + j)*dx + i;\n}\n\n__global__ void stencil25_stage1_kernel(const float* __restrict__ in,\n float* __restrict__ out,\n int dimx,int dimy,int dimz)\n{\n // TODO\n}\n\nextern \"C\" void stencil25_stage1_boundary(const float* d_in, float* d_out,\n int dimx,int dimy,int dimz)\n{\n // TODO: launch 3D grid over local z [0 .. dimz+7]\n}\n```", "task_dir": "eval-tasks/ch20-stencil-25pt-slab-stage1-boundary", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch20-mpi-halo-pack-unpack", "question": "Task: ch20-mpi-halo-pack-unpack\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n#include <cuda_runtime.h>\n\n// TODOs:\n// Implement two host wrappers that launch simple 2D grids:\n//\n// 1) halo_pack_boundaries(d_grid, dimx,dimy,dimz, d_left_send, d_right_send)\n// - Read 4 owned boundary planes and pack into left/right buffers.\n// Owned z range is [4 .. 4+dimz-1]. Pack planes:\n// left : k = 4 + p, p [0..3]\n// right: k = (4+dimz-4) + p, p [0..3]\n// - Packed layout is plane-major then row-major:\n// pack_idx(p,j,i) = (p*dimy + j)*dimx + i\n//\n// 2) halo_unpack_to_halos(d_grid, dimx,dimy,dimz, d_left_recv, d_right_recv)\n// - Write left_recv to left halo k = 0..3 (k = 0 + p)\n// - Write right_recv to right halo k = dimz+4 .. dimz+7 (k = dimz+4 + p)\n// - Same pack_idx layout for sources.\n//\n// Keep the rest of the grid untouched.\n\nstatic inline __device__ size_t idx3(int i,int j,int k,int dx,int dy){\n return (size_t(k)*dy + j)*dx + i;\n}\nstatic inline __device__ size_t pack_idx(int p,int j,int i,int dx,int dy){\n return (size_t(p)*dy + j)*dx + i;\n}\n\n__global__ void k_pack_student(const float* __restrict__ grid,\n int dimx,int dimy,int dimz,\n float* __restrict__ left_send,\n float* __restrict__ right_send)\n{\n // TODO: implement (mirror reference description above)\n}\n\n__global__ void k_unpack_student(float* __restrict__ grid,\n int dimx,int dimy,int dimz,\n const float* __restrict__ left_recv,\n const float* __restrict__ right_recv)\n{\n // TODO: implement (mirror reference description above)\n}\n\nextern \"C\" void halo_pack_boundaries(const float* d_grid,\n int dimx,int dimy,int dimz,\n float* d_left_send,\n float* d_right_send)\n{\n // TODO: choose block(16,16) grid(ceil) and launch k_pack_student, then sync\n}\n\nextern \"C\" void halo_unpack_to_halos(float* d_grid,\n int dimx,int dimy,int dimz,\n const float* d_left_recv,\n const float* d_right_recv)\n{\n // TODO: choose block(16,16) grid(ceil) and launch k_unpack_student, then sync\n}\n```", "task_dir": "eval-tasks/ch20-mpi-halo-pack-unpack", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch20-stencil-25pt-slab-stage2-interior", "question": "Task: ch20-stencil-25pt-slab-stage2-interior\nTarget file: student_kernel.cu\n\nGoal: Implement the Stage-2 interior pass for the 3D 25-point stencil. This pass updates only the interior owned planes (excluding the four boundary planes already handled in Stage-1).\n\nKey requirements:\n- Owned region indices: `[4 .. 4+dimz-1]`; Stage-2 updates planes `k [8 .. 4+dimz-5]`.\n- Use the same stencil weights (w0..w4) applied to 1..4 offsets in each dimension.\n- Respect copy-through behaviour near x/y faces inside the interior range.\n- Write nothing outside the designated interior planes.\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n\n// TODO: Implement Stage-2 interior update for the 25-point stencil (R=4).\n// Local z extent = dimz + 8. Owned z = [4 .. 4+dimz-1].\n// Stage-2 interior planes: k [8 .. (4+dimz-1)-4].\n// For i/j edges (i<4 || i>=dimx-4 || j<4 || j>=dimy-4) copy-through.\n// Do not touch halos or Stage-1 planes.\n//\n// STENCIL WEIGHTS: Use distance-based weighted stencil (NOT simple averaging!)\n// - w0 = 0.5 (center point)\n// - w1 = 0.10 (distance 1 neighbors: 1 in each axis)\n// - w2 = 0.05 (distance 2 neighbors: 2 in each axis)\n// - w3 = 0.025 (distance 3 neighbors: 3 in each axis)\n// - w4 = 0.0125 (distance 4 neighbors: 4 in each axis)\n//\n// Formula: out[i,j,k] = w0 * in[i,j,k]\n// + w1 * (in[i1,j,k] + in[i,j1,k] + in[i,j,k1])\n// + w2 * (in[i2,j,k] + in[i,j2,k] + in[i,j,k2])\n// + w3 * (in[i3,j,k] + in[i,j3,k] + in[i,j,k3])\n// + w4 * (in[i4,j,k] + in[i,j4,k] + in[i,j,k4])\n\nstatic inline __device__ size_t idx3(int i,int j,int k,int dx,int dy){\n return (size_t(k)*dy + j)*dx + i;\n}\n\n__global__ void stencil25_stage2_kernel(const float* __restrict__ in,\n float* __restrict__ out,\n int dimx,int dimy,int dimz)\n{\n // TODO\n}\n\nextern \"C\" void stencil25_stage2_interior(const float* d_in, float* d_out,\n int dimx,int dimy,int dimz)\n{\n // TODO: launch 3D grid and synchronize\n}\n```", "task_dir": "eval-tasks/ch20-stencil-25pt-slab-stage2-interior", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch20-mpi-stencil-pipeline-naive", "question": "Task: ch20-mpi-stencil-pipeline-naive\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n#include <cuda_runtime.h>\n#include <vector>\n#include <cassert>\n#include <cstdio>\n\n// ====== Utilities ======\nstatic inline __host__ __device__\nsize_t idx3(int i,int j,int k,int dx,int dy){ return (size_t(k)*dy + j)*dx + i; }\nstatic inline void ck(cudaError_t e,const char* m){ if(e!=cudaSuccess){fprintf(stderr,\"CUDA %s: %s\\n\",m,cudaGetErrorString(e)); std::exit(2);} }\n\n// ====== Provided stencil kernels & pack/unpack launchers ======\nextern \"C\" void stencil25_stage1_boundary(const float* d_in, float* d_out,\n int dimx,int dimy,int dz_local,\n int z_global_beg, int dimz_total);\nextern \"C\" void stencil25_stage2_interior(const float* d_in, float* d_out,\n int dimx,int dimy,int dz_local);\nextern \"C\" void halo_pack_boundaries(const float* d_slab_out,\n int dimx,int dimy,int dz_local,\n float* d_left_send, float* d_right_send);\nextern \"C\" void halo_unpack_to_halos(float* d_slab_out,\n int dimx,int dimy,int dz_local,\n const float* d_left_recv, const float* d_right_recv);\n\n// ====== Small helpers to scatter/gather between full & slab memory ======\n__global__ void k_scatter_from_full(const float* __restrict__ d_in_full,\n float* __restrict__ d_slab_in,\n int dimx,int dimy,int z0,int dz)\n{\n int i=blockIdx.x*blockDim.x+threadIdx.x;\n int j=blockIdx.y*blockDim.y+threadIdx.y;\n int t=blockIdx.z*blockDim.z+threadIdx.z; // local owned z [0..dz-1]\n if(i>=dimx||j>=dimy||t>=dz) return;\n int k_local = 4 + t;\n int k_full = z0 + t;\n d_slab_in[idx3(i,j,k_local,dimx,dimy)] =\n d_in_full[idx3(i,j,k_full,dimx,dimy)];\n}\n\n__global__ void k_gather_to_full(const float* __restrict__ d_slab_out,\n float* __restrict__ d_out_full,\n int dimx,int dimy,int z0,int dz)\n{\n int i=blockIdx.x*blockDim.x+threadIdx.x;\n int j=blockIdx.y*blockDim.y+threadIdx.y;\n int t=blockIdx.z*blockDim.z+threadIdx.z; // local owned z [0..dz-1]\n if(i>=dimx||j>=dimy||t>=dz) return;\n int k_local = 4 + t;\n int k_full = z0 + t;\n d_out_full[idx3(i,j,k_full,dimx,dimy)] =\n d_slab_out[idx3(i,j,k_local,dimx,dimy)];\n}\n\n// ====== YOUR TASK ======\nextern \"C\" void mpi_stencil_pipeline_naive(const float* d_in_full,\n float* d_out_full,\n int dimx,int dimy,int dimz_total,\n int procs)\n{\n // TODO: Implement naive MPI-style pipeline for multi-slab 25-point stencil:\n //\n // Requirements:\n // - Partition z-dimension into `procs` slabs (each slab owns dimz_total/procs planes)\n // - Each slab needs halo regions (4 planes on each side for R=4 stencil)\n // - Allocate per-slab buffers with halo space: dimx * dimy * (local_dz + 8)\n // - Scatter input data from full array to per-slab buffers (use k_scatter_from_full kernel)\n // - Compute boundary planes with stage1 kernel (updates 4 planes on each end)\n // - Pack boundary data and exchange halos between neighboring slabs (simulate MPI with cudaMemcpy)\n // - Unpack received halo data into neighbor regions\n // - Compute interior planes with stage2 kernel\n // - Gather results from per-slab buffers back to full array (use k_gather_to_full kernel)\n // - Free all allocated buffers\n //\n // Hints:\n // - Use provided helper kernels: k_scatter_from_full, k_gather_to_full\n // - Use provided packing functions: halo_pack_boundaries, halo_unpack_to_halos\n // - Stage1 kernel handles boundary planes, Stage2 handles interior\n // - Halo exchange is device-to-device cudaMemcpy (simulates MPI)\n (void)d_in_full; (void)d_out_full; (void)dimx; (void)dimy; (void)dimz_total; (void)procs;\n}```", "task_dir": "eval-tasks/ch20-mpi-stencil-pipeline-naive", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch20-mpi-stencil-pipeline-cudaaware", "question": "Task: ch20-mpi-stencil-pipeline-cudaaware\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n#include <cuda_runtime.h>\n#include <vector>\n#include <cassert>\n#include <cstdio>\n\nstatic inline __host__ __device__\nsize_t idx3(int i,int j,int k,int dx,int dy){ return (size_t(k)*dy + j)*dx + i; }\nstatic void ck(cudaError_t e,const char* m){ if(e!=cudaSuccess){fprintf(stderr,\"CUDA %s: %s\\n\",m,cudaGetErrorString(e)); std::exit(2);} }\n\n// Stage 1/2, pack/unpack launchers (provided)\nextern \"C\" void stencil25_stage1_boundary(const float* d_in, float* d_out,\n int dimx,int dimy,int dz_local,\n int z_global_beg, int dimz_total);\nextern \"C\" void stencil25_stage2_interior(const float* d_in, float* d_out,\n int dimx,int dimy,int dz_local);\nextern \"C\" void halo_pack_boundaries(const float* d_slab_out,\n int dimx,int dimy,int dz_local,\n float* d_left_send, float* d_right_send);\nextern \"C\" void halo_unpack_to_halos(float* d_slab_out,\n int dimx,int dimy,int dz_local,\n const float* d_left_recv, const float* d_right_recv);\n\n// CUDA-aware sendrecv wrapper (device->device)\nextern \"C\" void mpi_cudaaware_sendrecv_device(const float* d_sendbuf, int sendcount,\n float* d_recvbuf, int recvcount);\n\n// scatter/gather kernels (provided)\n__global__ void k_scatter_from_full(const float* __restrict__ d_in_full,\n float* __restrict__ d_slab_in,\n int dimx,int dimy,int z0,int dz)\n{\n int i=blockIdx.x*blockDim.x+threadIdx.x;\n int j=blockIdx.y*blockDim.y+threadIdx.y;\n int t=blockIdx.z*blockDim.z+threadIdx.z;\n if(i>=dimx||j>=dimy||t>=dz) return;\n int k_local = 4 + t;\n int k_full = z0 + t;\n d_slab_in[idx3(i,j,k_local,dimx,dimy)] =\n d_in_full[idx3(i,j,k_full,dimx,dimy)];\n}\n__global__ void k_gather_to_full(const float* __restrict__ d_slab_out,\n float* __restrict__ d_out_full,\n int dimx,int dimy,int z0,int dz)\n{\n int i=blockIdx.x*blockDim.x+threadIdx.x;\n int j=blockIdx.y*blockDim.y+threadIdx.y;\n int t=blockIdx.z*blockDim.z+threadIdx.z;\n if(i>=dimx||j>=dimy||t>=dz) return;\n int k_local = 4 + t;\n int k_full = z0 + t;\n d_out_full[idx3(i,j,k_full,dimx,dimy)] =\n d_slab_out[idx3(i,j,k_local,dimx,dimy)];\n}\n\nextern \"C\" void mpi_stencil_pipeline_cudaaware(const float* d_in_full,\n float* d_out_full,\n int dimx,int dimy,int dimz_total,\n int procs)\n{\n // TODO: Implement CUDA-aware MPI pipeline for multi-slab 25-point stencil:\n //\n // Requirements:\n // - Partition z-dimension into `procs` slabs (each owns dimz_total/procs planes)\n // - Each slab needs halo regions (4 planes on each side for R=4 stencil)\n // - Allocate per-slab buffers with halo space and exchange buffers\n // - Scatter input data from full array to per-slab buffers (use k_scatter_from_full)\n // - Compute boundary planes with stage1 kernel\n // - Pack boundary data for halo exchange\n // - Use CUDA-aware MPI to exchange halos directly between GPU buffers (mpi_cudaaware_sendrecv_device)\n // - Unpack received halo data into neighbor regions\n // - Compute interior planes with stage2 kernel\n // - Gather results back to full array (use k_gather_to_full)\n // - Free all allocated buffers\n //\n // Hints:\n // - CUDA-aware MPI allows direct GPU-to-GPU transfers without host staging\n // - Use provided helper kernels: k_scatter_from_full, k_gather_to_full\n // - Use provided packing functions: halo_pack_boundaries, halo_unpack_to_halos\n // - mpi_cudaaware_sendrecv_device handles bidirectional exchange between neighbors\n (void)d_in_full; (void)d_out_full; (void)dimx; (void)dimy; (void)dimz_total; (void)procs;\n}```", "task_dir": "eval-tasks/ch20-mpi-stencil-pipeline-cudaaware", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch21-bezier-dp-parent-child-single", "question": "Task: ch21-bezier-dp-parent-child-single\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// ch21-bezier-dp-parent-child-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n\n// Use CUDA builtin float2\n#include <vector_types.h>\n\nstruct BezierLine {\n float2 CP[3]; // P0, P1, P2\n float2* vertexPos; // device buffer (allocated in parent via device malloc)\n int nVertices; // chosen per line in parent\n};\n\n// --- You implement these -----------------------------------------------------\n\n// Geometric curvature proxy: distance from P1 to line P0-P2 (normalized by |P2-P0|)\n// Return non-negative curvature (0 for degenerate segment).\n__device__ float curvature_of(const float2 P0, const float2 P1, const float2 P2) {\n // TODO: implement robust point-to-segment distance proxy.\n // Hints:\n // v = P2 - P0\n // w = P1 - P0\n // area2 = |v.x*w.y - v.y*w.x| (2x triangle area)\n // base = sqrt(v.x*v.x + v.y*v.y)\n // curvature ~ area2 / max(base, 1e-8)\n return 0.0f; // TODO\n}\n\n// Child kernel: compute tessellated positions for one line lidx.\n__global__ void computeBezierLine_child(int lidx, BezierLine* bLines, int nTess) {\n // TODO:\n // - idx = blockIdx.x*blockDim.x + threadIdx.x\n // - if idx >= nTess: return\n // - u = idx / (nTess-1) (float)\n // - B0=(1-u)^2, B1=2u(1-u), B2=u^2\n // - position = B0*P0 + B1*P1 + B2*P2\n // - write to bLines[lidx].vertexPos[idx]\n}\n\n// Parent kernel: choose tessellation density, allocate vertex buffers, launch child.\n__global__ void computeBezierLines_parent(BezierLine* bLines, int nLines, int maxTess) {\n // TODO:\n // - lidx = blockIdx.x*blockDim.x + threadIdx.x; if (lidx>=nLines) return\n // - compute curvature_of(...)\n // - nVerts = clamp( int(curv*16.f)+4, 4, maxTess );\n // - bLines[lidx].nVertices = nVerts;\n // - bLines[lidx].vertexPos = (float2*)malloc(nVerts * sizeof(float2));\n // * if malloc returns nullptr, set nVertices=0 and return.\n // - launch child: <<< (nVerts+31)/32, 32 >>> computeBezierLine_child(lidx, bLines, nVerts);\n}\n```", "task_dir": "eval-tasks/ch21-bezier-dp-parent-child-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch21-bezier-dp-free-child-buffers", "question": "Task: ch21-bezier-dp-free-child-buffers\nTarget file: student_kernel.cu\n\nHere is the current skeleton you must complete:\n\n```cuda\n// ch21-bezier-dp-free-child-buffers / student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n\n#include <vector_types.h>\n\nstruct BezierLine {\n float2 CP[3];\n float2* vertexPos; // device-heap pointer\n int nVertices;\n};\n\n// TODO: Implement idempotent free:\n// - lidx = blockIdx.x*blockDim.x + threadIdx.x; if (lidx>=nLines) return;\n// - if (bLines[lidx].vertexPos != nullptr) { free(ptr); bLines[lidx].vertexPos = nullptr; }\n// - (optional) bLines[lidx].nVertices = 0;\n__global__ void freeVertexMem(BezierLine* bLines, int nLines) {\n // TODO\n}\n```", "task_dir": "eval-tasks/ch21-bezier-dp-free-child-buffers", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch21-quadtree-dp-build-single", "question": "Task: ch21-quadtree-dp-build-single\n Target file: student_kernel.cu\n\n Implementation requirements:\n- quadtree_build_parent should create a QuadWork covering all points and launch quadtree_node.\n- quadtree_node must emit leaves when reaching depth or point thresholds, updating perm and leaf metadata with atomics.\n- Otherwise perform two-pass quadrant partitioning (count, scan, scatter) into a device buffer in NW/NE/SW/SE order.\n- Launch child nodes for non-empty quadrants, synchronize, and free temporary buffers before returning.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch21-quadtree-dp-build-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n\n// ----------------------------- Data types -----------------------------\nstruct Bounds { float minx, miny, maxx, maxy; };\n__device__ __host__ inline Bounds make_bounds(float a,float b,float c,float d){ return Bounds{a,b,c,d}; }\n\nstruct QuadWork {\n const float* x;\n const float* y;\n const int* idx; // indices of points for this segment\n int begin; // segment begin (relative to idx)\n int count; // segment length\n Bounds b;\n int depth;\n int max_depth;\n int min_points;\n // outputs/globals\n int* perm; // output permutation (length n)\n int* leafOffset; // length >= n\n int* leafCount; // length >= n\n int* leafCounter; // single int in device memory\n int* permCursor; // single int in device memory\n};\n\n// Prototypes\n__global__ void quadtree_build_parent(const float* x, const float* y, int n,\n Bounds root, int max_depth, int min_points,\n int* perm, int* leafOffset, int* leafCount,\n int* leafCounter, int* permCursor,\n const int* idx_root);\n\n__global__ void quadtree_node(QuadWork w);\n\n// ----------------------------- Helpers -----------------------------\n__device__ __host__ inline bool in_NW(float px, float py, const Bounds& b){\n float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n return (px < mx) && (py >= my);\n}\n__device__ __host__ inline bool in_NE(float px, float py, const Bounds& b){\n float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n return (px >= mx) && (py >= my);\n}\n__device__ __host__ inline bool in_SW(float px, float py, const Bounds& b){\n float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n return (px < mx) && (py < my);\n}\n__device__ __host__ inline bool in_SE(float px, float py, const Bounds& b){\n float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n return (px >= mx) && (py < my);\n}\n\n__device__ __host__ inline Bounds child_bounds_NW(const Bounds& b){\n float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n return make_bounds(b.minx, my, mx, b.maxy);\n}\n__device__ __host__ inline Bounds child_bounds_NE(const Bounds& b){\n float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n return make_bounds(mx, my, b.maxx, b.maxy);\n}\n__device__ __host__ inline Bounds child_bounds_SW(const Bounds& b){\n float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n return make_bounds(b.minx, b.miny, mx, my);\n}\n__device__ __host__ inline Bounds child_bounds_SE(const Bounds& b){\n float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n return make_bounds(mx, b.miny, b.maxx, my);\n}\n\n// ----------------------------- Kernels -----------------------------\n__global__ void quadtree_build_parent(const float* x, const float* y, int n,\n Bounds root, int max_depth, int min_points,\n int* perm, int* leafOffset, int* leafCount,\n int* leafCounter, int* permCursor,\n const int* idx_root)\n{\n // TODO: Construct a QuadWork item for the root node and invoke child kernels\n // to recursively partition the point set.\n if (blockIdx.x == 0 && threadIdx.x == 0) {\n (void)x; (void)y; (void)n;\n (void)root; (void)max_depth; (void)min_points;\n (void)perm; (void)leafOffset; (void)leafCount;\n (void)leafCounter; (void)permCursor; (void)idx_root;\n }\n}\n\n__global__ void quadtree_node(QuadWork w)\n{\n // TODO: Recursively process the current node, partitioning points into child\n // quadrants and recording leaf metadata.\n if (threadIdx.x == 0) {\n (void)w;\n }\n}\n ```", "task_dir": "eval-tasks/ch21-quadtree-dp-build-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}
{"type": "coding", "id": "ch21-quadtree-dp-pack-coalesced", "question": "Task: ch21-quadtree-dp-pack-coalesced\n Target file: student_kernel.cu\n\n Implementation requirements:\n- Use shared memory arrays to track counts, offsets, and cursors for four quadrants.\n- Count the number of indices per quadrant within the segment.\n- Compute exclusive offsets in NW, NE, SW, SE order.\n- Scatter indices back into idx_out[segBegin:segBegin+segCount) with stable ordering within each quadrant.\n\n Here is the current student_kernel.cu:\n\n ```cuda\n // ch21-quadtree-dp-pack-coalesced / student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n\n// TODO: Implement quadrant classification helpers\n// Each function should determine if point (px, py) belongs to the specified quadrant\n// of the bounding box [minx, miny, maxx, maxy].\n//\n// Hints:\n// - Compute midpoint: mx = (minx + maxx) / 2, my = (miny + maxy) / 2\n// - NW (Northwest): px < mx AND py >= my\n// - NE (Northeast): px >= mx AND py >= my\n// - SW (Southwest): px < mx AND py < my\n// - SE (Southeast): px >= mx AND py < my\n\nextern \"C\" __global__\nvoid pack_quadrants_singleblock(const float* __restrict__ x,\n const float* __restrict__ y,\n const int* __restrict__ idx_in,\n int* __restrict__ idx_out,\n int segBegin, int segCount,\n float minx, float miny, float maxx, float maxy)\n{\n // TODO: Implement quadrant packing with shared memory count/scan/scatter:\n //\n // Step 1: Count phase\n // - Each thread classifies its assigned points into one of 4 quadrants\n // - Use your quadrant helper functions (in_NW, in_NE, in_SW, in_SE)\n // - Store per-thread counts for each quadrant in shared memory\n //\n // Step 2: Parallel scan (prefix sum)\n // - Compute exclusive scan of counts for each quadrant to get write offsets\n // - This ensures stable ordering (preserves input order within each quadrant)\n // - Use __syncthreads() between phases\n //\n // Step 3: Scatter phase\n // - Each thread writes its assigned points to idx_out at computed offsets\n // - NW quadrant: starts at offset 0\n // - NE quadrant: starts at offset count_NW\n // - SW quadrant: starts at offset count_NW + count_NE\n // - SE quadrant: starts at offset count_NW + count_NE + count_SW\n //\n // Hints:\n // - Single block kernel: use blockDim.x threads\n // - Shared memory arrays for counts and offsets\n // - Points are indexed via idx_in[segBegin + i]\n // - Point coordinates: x[idx], y[idx]\n (void)x; (void)y; (void)idx_in; (void)idx_out;\n (void)segBegin; (void)segCount;\n (void)minx; (void)miny; (void)maxx; (void)maxy;\n}\n ```", "task_dir": "eval-tasks/ch21-quadtree-dp-pack-coalesced", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference"}