File size: 135,679 Bytes
d948bcb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
{"type": "coding", "id": "ch02-vecadd-single-turn", "task_dir": "evaluation-tasks/ch02-vecadd-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch02-vecadd-single-turn\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n\n__global__ void vecAddKernel(const float* A, const float* B, float* C, int n) {\n    int i = blockIdx.x * blockDim.x + threadIdx.x;\n    if (i < n) C[i] = A[i] + B[i];\n}\n```"}
{"type": "coding", "id": "ch02-vecmul-single-turn", "task_dir": "evaluation-tasks/ch02-vecmul-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "question": "Task: ch02-vecmul-single-turn\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n\n__global__ void vecMulKernel(const float* A, const float* B, float* C, int n) {\n    // TODO: Each thread i computes: C[i] = A[i] * B[i]  (if i < n)\n    // Hints:\n    //  - Derive global index i from block and thread indices\n    //  - Guard against i >= n\n}\n```"}
{"type": "coding", "id": "ch03-ex1a-matmul-row-per-thread", "task_dir": "evaluation-tasks/ch03-ex1a-matmul-row-per-thread", "student_file": "student_kernel.cu", "student_targets": ["run_student"], "reference_targets": ["run_reference"], "timeout_sec": 180, "question": "Task: ch03-ex1a-matmul-row-per-thread\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n\nextern \"C\" __global__\nvoid matrixMulRowKernel(const float* __restrict__ M,\n                        const float* __restrict__ N,\n                        float* __restrict__ P,\n                        int size) {\n    // TODO:\n    // - Each thread computes ONE output row 'row'\n    // - Guard: if (row < size)\n    // - For each column 'col', compute dot(M[row, :], N[:, col])\n    // - Write P[row * size + col]\n    // Hints:\n    //   int row = blockIdx.x * blockDim.x + threadIdx.x;\n    //   for (int col = 0; col < size; ++col) { ... }\n}\n```"}
{"type": "coding", "id": "ch03-ex1b-matmul-col-per-thread", "task_dir": "evaluation-tasks/ch03-ex1b-matmul-col-per-thread", "student_file": "student_kernel.cu", "student_targets": ["run_student"], "reference_targets": ["run_reference"], "timeout_sec": 180, "question": "Task: ch03-ex1b-matmul-col-per-thread\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n\nextern \"C\" __global__\nvoid matrixMulColKernel(const float* __restrict__ M,\n                        const float* __restrict__ N,\n                        float* __restrict__ P,\n                        int size) {\n    // TODO:\n    // - Each thread computes ONE output column 'col'\n    // - Guard: if (col < size)  \n    // - For each row 'row', compute dot(M[row, :], N[:, col])\n    // - Write P[row * size + col]\n    // Hints:\n    //   int col = blockIdx.x * blockDim.x + threadIdx.x;\n    //   for (int row = 0; row < size; ++row) { ... }\n}\n```"}
{"type": "coding", "id": "ch03-rgb2gray-single-turn", "task_dir": "evaluation-tasks/ch03-rgb2gray-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch03-rgb2gray-single-turn\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n#include <math.h>\n\n__device__ __forceinline__ unsigned char clamp_u8(int v) {\n    return (unsigned char)(v < 0 ? 0 : (v > 255 ? 255 : v));\n}\n\n__global__ void rgb2grayKernel(const unsigned char* R,\n                               const unsigned char* G,\n                               const unsigned char* B,\n                               unsigned char* gray,\n                               int n) {\n    // TODO:\n    // - Compute global index i\n    // - If (i < n), compute:\n    //     float y = 0.299f*R[i] + 0.587f*G[i] + 0.114f*B[i];\n    //     int yi = (int)floorf(y + 0.5f);   // round to nearest\n    //     gray[i] = clamp_u8(yi);\n}\n```"}
{"type": "coding", "id": "ch04-device-props-eval", "task_dir": "evaluation-tasks/ch04-device-props-eval", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch04-device-props-eval\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n#include \"student_kernel.cuh\"\n#include <cuda_runtime.h>\n#include <string.h>\n\nint collect_device_info(DeviceInfo* out, int max_out, int* out_count) {\n    // TODO: Implement using CUDA Runtime API\n    // Required calls:\n    //   - cudaGetDeviceCount(&count)\n    //   - For each device id in [0, count): cudaGetDeviceProperties(&prop, id)\n    //\n    // Required fields to fill in for each DeviceInfo (from cudaDeviceProp prop):\n    //   name -> prop.name (ensure null-terminated)\n    //   major -> prop.major\n    //   minor -> prop.minor\n    //   totalGlobalMem -> prop.totalGlobalMem\n    //   multiProcessorCount -> prop.multiProcessorCount\n    //   totalConstMem -> prop.totalConstMem\n    //   sharedMemPerBlock -> prop.sharedMemPerBlock\n    //   regsPerBlock -> prop.regsPerBlock\n    //   warpSize -> prop.warpSize\n    //   maxThreadsPerBlock -> prop.maxThreadsPerBlock\n    //   maxThreadsDim{0,1,2} -> prop.maxThreadsDim[0..2]\n    //   maxGridSize{0,1,2} -> prop.maxGridSize[0..2]\n    //   clockRate -> prop.clockRate\n    //   memoryClockRate -> prop.memoryClockRate\n    //   memoryBusWidth -> prop.memoryBusWidth\n    //   l2CacheSize -> prop.l2CacheSize\n    //\n    // Return 0 on success, non-zero on failure.\n\n    (void)out; (void)max_out; (void)out_count; // remove after implementing\n    return 1; // placeholder: non-zero means \"not implemented\"\n}\n```"}
{"type": "coding", "id": "ch05-matmul-tiled", "task_dir": "evaluation-tasks/ch05-matmul-tiled", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch05-matmul-tiled\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n#include <vector>\n#include <cstring>\n#include <cstdio>\n\n#ifndef TILE\n#define TILE 16\n#endif\n\n#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }\ninline void gpuAssert(cudaError_t code, const char* file, int line, bool abort=true){\n  if(code != cudaSuccess){\n    fprintf(stderr,\"GPUassert: %s %s %d\\n\", cudaGetErrorString(code), file, line);\n    if(abort) exit(code);\n  }\n}\n\n// Students: complete a *shared-memory tiled* GEMM kernel.\n// Requirements:\n//  - Use two __shared__ tiles [TILE][TILE] for M and N\n//  - Load with bounds checks; out-of-range elements treated as 0\n//  - __syncthreads() after loading tiles and after computing per-phase\n//  - Compute P[row, col] for all valid row< m and col< o\n//  - Do NOT modify inputs M_d or N_d (they are const on host side)\n\n__global__ void TiledMatMulKernel(const float* __restrict__ M,\n                                  const float* __restrict__ N,\n                                  float* __restrict__ P,\n                                  int m, int n, int o)\n{\n    // TODO: implement shared-memory tiling\n    // Suggested outline:\n    // 1) compute row/col from blockIdx/threadIdx\n    // 2) loop over phases ph: 0..ceil(n/TILE)-1\n    //    - load M and N tiles with bounds checks into __shared__\n    //    - __syncthreads()\n    //    - accumulate partial sum for k in [0..TILE)\n    //    - __syncthreads()\n    // 3) if(row<m && col<o) write result\n    // (Leave a stub so it compiles but fails tests until implemented)\n\n    int row = blockIdx.y * blockDim.y + threadIdx.y;\n    int col = blockIdx.x * blockDim.x + threadIdx.x;\n    float acc = 0.f;\n\n    // Incorrect placeholder (so starter compiles but fails):\n    if (row < m && col < o) {\n        acc = 0.f; // TODO replace with real tiled computation\n        P[row*o + col] = acc;\n    }\n}\n\nextern \"C\"\nvoid launch_tiled_matmul(const float* M_h, const float* N_h, float* P_h,\n                         int m, int n, int o)\n{\n    // Handle degenerate sizes\n    if(m==0 || n==0 || o==0) return;\n\n    size_t bytesM = size_t(m)*n*sizeof(float);\n    size_t bytesN = size_t(n)*o*sizeof(float);\n    size_t bytesP = size_t(m)*o*sizeof(float);\n\n    const int GUARD = 128; // guard-band elements around output buffer\n\n    float *M_d=nullptr, *N_d=nullptr, *Pguard_d=nullptr;\n    gpuErrchk(cudaMalloc(&M_d, bytesM));\n    gpuErrchk(cudaMalloc(&N_d, bytesN));\n    gpuErrchk(cudaMalloc(&Pguard_d, bytesP + 2*GUARD*sizeof(float)));\n\n    // init guards with a pattern\n    gpuErrchk(cudaMemset(Pguard_d, 0x7B, GUARD*sizeof(float)));\n    gpuErrchk(cudaMemset(Pguard_d + GUARD + m*o, 0x7B, GUARD*sizeof(float)));\n\n    gpuErrchk(cudaMemcpy(M_d, M_h, bytesM, cudaMemcpyHostToDevice));\n    gpuErrchk(cudaMemcpy(N_d, N_h, bytesN, cudaMemcpyHostToDevice));\n\n    dim3 block(TILE, TILE);\n    dim3 grid( (o + TILE - 1)/TILE, (m + TILE - 1)/TILE );\n\n    float* P_d = Pguard_d + GUARD;\n\n    TiledMatMulKernel<<<grid, block>>>(M_d, N_d, P_d, m, n, o);\n    gpuErrchk(cudaGetLastError());\n    gpuErrchk(cudaDeviceSynchronize());\n\n    // copy back entire guarded region to check guard bands\n    std::vector<float> P_with_guard(m*o + 2*GUARD);\n    gpuErrchk(cudaMemcpy(P_with_guard.data(), Pguard_d,\n                         P_with_guard.size()*sizeof(float),\n                         cudaMemcpyDeviceToHost));\n\n    // verify guard bands are intact (simple byte-pattern check)\n    // we won't fail here; harness uses value correctness + input immutability.\n    // (If wanted, you can add explicit guard validation logic.)\n\n    // return just the center (actual P)\n    std::memcpy(P_h, P_with_guard.data()+GUARD, bytesP);\n\n    cudaFree(M_d);\n    cudaFree(N_d);\n    cudaFree(Pguard_d);\n}\n```"}
{"type": "coding", "id": "ch05-matmul-tiled-multiturn", "task_dir": "evaluation-tasks/ch05-matmul-tiled-multiturn", "student_file": "student_kernel.cu", "student_targets": ["run-student"], "reference_targets": ["run-reference"], "timeout_sec": 180, "question": "Task: ch05-matmul-tiled-multiturn\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// student_kernel.cu\n// Implement a shared-memory tiled matrix multiply kernel:\n//   C[M x K] = A[M x N] * B[N x K]\n// TILE size is 16x16. Handle non-multiple sizes and out-of-bounds safely.\n\n#include <cuda_runtime.h>\n\nextern \"C\" void launch_student(const float* A, const float* B, float* C,\n                               int M, int N, int K, int blockSize);\n\n// TODO: Implement this kernel\n__global__ void matmul_tiled_student(const float* __restrict__ A,\n                                     const float* __restrict__ B,\n                                     float* __restrict__ C,\n                                     int M, int N, int K)\n{\n    // TODO: Implement shared-memory tiled matrix multiplication\n    // REQUIRED: TILE = 16\n    // \n    // Steps to implement:\n    // 1. Define TILE size (16)\n    // 2. Calculate 2D thread coordinates (row, col) in output matrix C\n    // 3. Declare shared memory tiles for A and B submatrices  \n    // 4. Initialize accumulator\n    // 5. Loop over tiles along the inner dimension N:\n    //    a. Cooperatively load A tile and B tile into shared memory\n    //    b. Guard against out-of-bounds accesses (pad with zeros)\n    //    c. Synchronize threads (__syncthreads())\n    //    d. Compute partial products using shared memory tiles\n    //    e. Synchronize threads again\n    // 6. Write final result to global memory (with bounds checking)\n}\n\nextern \"C\" void launch_student(const float* A, const float* B, float* C,\n                               int M, int N, int K, int /*blockSize*/)\n{\n    // TODO: Set up proper grid and block dimensions\n    // Hint: Use 16x16 thread blocks, calculate grid size based on output dimensions\n    \n    dim3 block(16, 16);\n    dim3 grid((K + 15) / 16, (M + 15) / 16);\n    matmul_tiled_student<<<grid, block>>>(A, B, C, M, N, K);\n}\n```"}
{"type": "coding", "id": "ch05-matmul-tiled-speed", "task_dir": "evaluation-tasks/ch05-matmul-tiled-speed", "student_file": "student_kernel.cu", "student_targets": ["test"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_matmul_speed", "reference_exec": "./test_reference", "question": "Task: ch05-matmul-tiled-speed\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <cstdio>\n\n#ifndef TILE\n#define TILE 16\n#endif\n\n// Students implement this kernel: C[M x K] = A[M x N] * B[N x K]\n// One thread computes one C element; shared-memory tiled load of A and B.\n__global__ void matmul_tiled_student_kernel(const float* __restrict__ A,\n                                            const float* __restrict__ B,\n                                            float* __restrict__ C,\n                                            int M, int N, int K) {\n    // TODO:\n    // - Compute (row, col) from blockIdx/threadIdx\n    // - Loop over tiles of N dimension\n    // - Use shared memory tiles for A (TILE x TILE) and B (TILE x TILE)\n    // - Guard for out-of-bounds loads/stores\n    // - Accumulate sum into a register and store to C[row*K + col]\n\n    // Hints (remove after implementing):\n    // extern __shared__ float smem[]; // or static shared tiles\n    // __shared__ float As[TILE][TILE], Bs[TILE][TILE];\n    // int row = blockIdx.y * blockDim.y + threadIdx.y;\n    // int col = blockIdx.x * blockDim.x + threadIdx.x;\n\n    // --- your code here ---\n\n    // Placeholder stub (compiles but gives wrong results):\n    int row = blockIdx.y * blockDim.y + threadIdx.y;\n    int col = blockIdx.x * blockDim.x + threadIdx.x;\n\n    if (row < M && col < K) {\n        C[row * K + col] = 0.0f; // TODO: replace with proper tiled computation\n    }\n}\n\n// Host wrapper called by test harness\nextern \"C\" void matmul_student(const float* dA, const float* dB, float* dC,\n                               int M, int N, int K, int tile) {\n    dim3 block(TILE, TILE);\n    dim3 grid((K + TILE - 1)/TILE, (M + TILE - 1)/TILE);\n    // You may ignore `tile` and use TILE macro; the harness passes TILE=16.\n    matmul_tiled_student_kernel<<<grid, block>>>(dA, dB, dC, M, N, K);\n}\n```"}
{"type": "coding", "id": "ch06-thread-coarsening-matmul", "task_dir": "evaluation-tasks/ch06-thread-coarsening-matmul", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch06-thread-coarsening-matmul\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n#include <cstdio>\n\n#ifndef TILE_WIDTH\n#define TILE_WIDTH 16\n#endif\n\n#ifndef COARSE_FACTOR\n#define COARSE_FACTOR 4\n#endif\n\n// Students: implement a coarsened, tiled GEMM C[M×K] = A[M×N] * B[N×K]\n// Each block computes a tile: height TILE_WIDTH, width TILE_WIDTH*COARSE_FACTOR\n// Use shared memory tiles for A and B; do safe (bounds-checked) loads.\n// Row-major layout: elem(i,j) at base[i*ld + j].\n\n__global__ void MatmulCoarsenedKernel(const float* __restrict__ A,\n                                      const float* __restrict__ B,\n                                      float* __restrict__ C,\n                                      int M, int N, int K)\n{\n    // TODO: Implement thread coarsening matrix multiplication\n    //\n    // Key requirements:\n    // 1. Use shared memory tiles for A and B:\n    //    __shared__ float As[TILE_WIDTH][TILE_WIDTH];\n    //    __shared__ float Bs[TILE_WIDTH][TILE_WIDTH * COARSE_FACTOR];\n    //\n    // 2. Each thread computes COARSE_FACTOR output elements\n    //    - Thread (tx,ty) computes elements at columns: colBase + c*TILE_WIDTH for c=0..COARSE_FACTOR-1\n    //    - Use register array: float acc[COARSE_FACTOR];\n    //\n    // 3. Loop over tiles of the N dimension:\n    //    - Load A tile (TILE_WIDTH x TILE_WIDTH)\n    //    - Load B super-tile (TILE_WIDTH x TILE_WIDTH*COARSE_FACTOR) in COARSE_FACTOR stripes\n    //    - __syncthreads() after loading\n    //    - Compute partial products with triple nested loop (k, c)\n    //    - __syncthreads() before next iteration\n    //\n    // 4. Write results with bounds checking\n    //\n    // Template structure:\n    // const int ty = threadIdx.y;\n    // const int tx = threadIdx.x;\n    // const int row = blockIdx.y * TILE_WIDTH + ty;\n    // const int colBase = blockIdx.x * (TILE_WIDTH * COARSE_FACTOR) + tx;\n    //\n    // float acc[COARSE_FACTOR];\n    // for (int c = 0; c < COARSE_FACTOR; ++c) acc[c] = 0.0f;\n    //\n    // Loop over tiles...\n    //\n    // Write results...\n\n    // Placeholder implementation (incorrect, will fail tests):\n    int row = blockIdx.y * blockDim.y + threadIdx.y;\n    int col = blockIdx.x * blockDim.x + threadIdx.x;\n\n    if (row < M && col < K) {\n        C[row * K + col] = 0.0f; // TODO: Replace with actual coarsened computation\n    }\n}\n\n// Student launcher: choose grid/block and launch your kernel\nextern \"C\" void launch_student(const float* A_d,\n                               const float* B_d,\n                               float* C_d,\n                               int M, int N, int K)\n{\n    // TODO: Configure proper grid and block dimensions for thread coarsening\n    //\n    // Key points:\n    // - Block size should be (TILE_WIDTH, TILE_WIDTH)\n    // - Grid X dimension should account for COARSE_FACTOR: (K + TILE_WIDTH*COARSE_FACTOR - 1) / (TILE_WIDTH*COARSE_FACTOR)\n    // - Grid Y dimension covers rows: (M + TILE_WIDTH - 1) / TILE_WIDTH\n\n    // Placeholder launch (incorrect dimensions):\n    dim3 block(TILE_WIDTH, TILE_WIDTH);\n    dim3 grid((K + TILE_WIDTH - 1) / TILE_WIDTH, (M + TILE_WIDTH - 1) / TILE_WIDTH);\n\n    MatmulCoarsenedKernel<<<grid, block>>>(A_d, B_d, C_d, M, N, K);\n}\n```"}
{"type": "coding", "id": "ch07-conv2d-tiled-constant", "task_dir": "evaluation-tasks/ch07-conv2d-tiled-constant", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch07-conv2d-tiled-constant\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// students edit only this file\n#include <cuda_runtime.h>\n#include <cstdio>\n\n#ifndef TILE\n#define TILE 16\n#endif\n\n#ifndef MAX_RADIUS\n#define MAX_RADIUS 8   // supports up to (2*8+1)=17x17 filters\n#endif\n\n// 1D constant buffer for filter coefficients (row-major)\n// Size = (2*MAX_RADIUS+1)^2\n__constant__ float c_filter[(2*MAX_RADIUS+1)*(2*MAX_RADIUS+1)];\n\nextern \"C\" __host__ void setFilterConstant(const float* h_filter, int r) {\n    const int K = 2*r + 1;\n    cudaMemcpyToSymbol(c_filter, h_filter, K*K*sizeof(float), 0, cudaMemcpyHostToDevice);\n}\n\n// Students must implement this kernel.\n// Requirements:\n// - Shared-memory tiling with halo of +/-r (use zero padding for out-of-bounds loads)\n// - Use c_filter (in constant memory) for filter coefficients\n// - Each thread computes one output pixel (if in bounds)\n// - Grid/block: 2D, blockDim=(TILE,TILE), gridDim=ceil(W/TILE) x ceil(H/TILE)\n// - Inputs/outputs are float* (grayscale), shapes: in/out = H*W\n// - r is runtime radius (<= MAX_RADIUS)\n__global__ void conv2d_tiled_constant_kernel(const float* __restrict__ in,\n                                             float* __restrict__ out,\n                                             int H, int W, int r)\n{\n    // TODO: Implement tiled 2D convolution with constant memory\n    //\n    // Key steps:\n    // 1) Compute global (x,y) coordinates for this thread\n    // 2) Declare shared memory tile with halo: extern __shared__ float smem[];\n    //    Size needed: (TILE+2*r) * (TILE+2*r) * sizeof(float)\n    // 3) Compute the tile's coverage region including halo\n    // 4) Cooperatively load the entire tile+halo region into shared memory\n    //    - Use zero padding for out-of-bounds pixels\n    //    - May need nested loops for threads to cover entire shared memory region\n    // 5) __syncthreads() to ensure all data is loaded\n    // 6) If this thread's output pixel is in bounds, compute convolution:\n    //    - Access input pixels from shared memory (with proper offsets)\n    //    - Access filter coefficients from c_filter constant memory\n    //    - Accumulate weighted sum\n    // 7) Write result to global output memory\n    //\n    // Hints:\n    // - Filter coefficients in c_filter are stored row-major: c_filter[(dy+r)*(2*r+1) + (dx+r)]\n    // - Shared memory indexing: smem[sy * tileWidth + sx] where tileWidth = TILE+2*r\n    // - Global input indexing: in[gy * W + gx]\n    // - Consider boundary conditions carefully for both image edges and tile edges\n\n    // Placeholder implementation (will fail until properly implemented):\n    int x = blockIdx.x * blockDim.x + threadIdx.x;\n    int y = blockIdx.y * blockDim.y + threadIdx.y;\n\n    if (x < W && y < H) {\n        // This is incorrect - just copies input to output\n        out[y * W + x] = in[y * W + x];\n    }\n}\n```"}
{"type": "coding", "id": "ch08-heat-3d-single-turn", "task_dir": "evaluation-tasks/ch08-heat-3d-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch08-heat-3d-single-turn\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <cstdio>\n\n// Implement a single explicit 7-point heat step.\n// in  : N*N*N input (flattened, row-major: i*N*N + j*N + k)\n// out : N*N*N output (same layout)\n// N   : grid dimension\n// alpha, dt, dx: physical parameters; r = alpha*dt/(dx*dx)\n// Boundary policy: copy boundary through (out = in) if any neighbor would be OOB.\n__global__ void heat_step_kernel(const float* __restrict__ in,\n                                 float* __restrict__ out,\n                                 unsigned int N,\n                                 float alpha, float dt, float dx)\n{\n    // TODO:\n    // 1) Compute (i,j,k) from block and thread indices\n    // 2) If any of i,j,k is 0 or N-1 => boundary: out = in and return\n    // 3) Else compute r = alpha*dt/(dx*dx) and 7-point update:\n    //    out = in + r*(sum six neighbors - 6*in)\n    // Guard small N (N<3) by simply copying (no interior exists).\n}\n```"}
{"type": "coding", "id": "ch08-stencil3d-basic-single-turn", "task_dir": "evaluation-tasks/ch08-stencil3d-basic-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch08-stencil3d-basic-single-turn\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <cstdio>\n\n__global__ void stencil3d_basic_student(\n    const float* __restrict__ in,\n    float* __restrict__ out,\n    int N,\n    float c0, float c1, float c2, float c3, float c4, float c5, float c6)\n{\n    // TODO:\n    // - Each thread computes OUT(i,j,k) for one grid point\n    // - Use 7-point stencil on INTERIOR points: (1..N-2) in each dim\n    // - For boundary points (i==0 || i==N-1 || ...), copy through: out = in\n    // - Guard for N==0 or N==1 safely\n    // Hints:\n    //   int i = blockIdx.z * blockDim.z + threadIdx.z;\n    //   int j = blockIdx.y * blockDim.y + threadIdx.y;\n    //   int k = blockIdx.x * blockDim.x + threadIdx.x;\n    //   int idx = (i * N + j) * N + k;\n    //   int L = ((i) * N + j) * N + (k-1); // k-1, etc.\n}\n```"}
{"type": "coding", "id": "ch08-stencil3d-sharedmem-single-turn", "task_dir": "evaluation-tasks/ch08-stencil3d-sharedmem-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch08-stencil3d-sharedmem-single-turn\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <cstdio>\n\n// Tile parameters for this task\n#ifndef IN_TILE_DIM\n#define IN_TILE_DIM 8          // threads per dim that load (with halo)\n#endif\n#define OUT_TILE_DIM (IN_TILE_DIM-2)\n\n__global__ void stencil3d_shared_student(\n    const float* __restrict__ in,\n    float* __restrict__ out,\n    int N,\n    float c0, float c1, float c2, float c3, float c4, float c5, float c6)\n{\n    // TODO:\n    // - Launch with block=(IN_TILE_DIM,IN_TILE_DIM,IN_TILE_DIM)\n    // - Each block loads a IN_TILE_DIM^3 tile (with halo) into shared memory\n    // - Only threads with local coords in [1..IN_TILE_DIM-2] compute outputs\n    //   for the corresponding global interior coordinates\n    // - Copy-through boundary cells (same rule as basic)\n    // Hints:\n    //   Shared array: __shared__ float tile[IN_TILE_DIM][IN_TILE_DIM][IN_TILE_DIM];\n    //   Global coords start at (blockIdx * OUT_TILE_DIM) - 1 (to include halo)\n    //   Guard global loads (row/col/depth) that fall outside [0..N-1]\n}\n```"}
{"type": "coding", "id": "ch09-histogram-naive-single-turn", "task_dir": "evaluation-tasks/ch09-histogram-naive-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch09-histogram-naive-single-turn\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <cstddef>\n\n// TODO: Implement naive global-atomic histogram.\n//\n// Requirements:\n// - Use global-memory atomicAdd(&hist[bin], 1u)\n// - Grid-stride loop over N\n// - Ignore out-of-range bin indices\n// - Do not write to 'in'\n// - No shared memory\n//\n// Signature must not change.\n__global__ void histogram_kernel(const int* in, unsigned int* hist,\n                                 size_t N, int num_bins)\n{\n    // TODO:\n    // size_t i = ...\n    // size_t stride = ...\n    // for (; i < N; i += stride) {\n    //   int bin = in[i];\n    //   if (0 <= bin && bin < num_bins) atomicAdd(&hist[bin], 1u);\n    // }\n}\n```"}
{"type": "coding", "id": "ch09-histogram-shared-single-turn", "task_dir": "evaluation-tasks/ch09-histogram-shared-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch09-histogram-shared-single-turn\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <cstddef>\n\n// TODO: Implement shared-memory privatized histogram.\n//\n// Requirements:\n// - Use extern __shared__ unsigned int s_hist[] of size num_bins\n// - Cooperatively zero shared histogram\n// - Grid-stride loop with shared memory accumulation\n// - Block sync, then cooperatively merge to global with atomics\n// - Handle num_bins > blockDim.x\n// - Do not write to 'in'\n//\n// Algorithm:\n// 1. extern __shared__ unsigned int s_hist[];\n// 2. Cooperatively zero s_hist[0..num_bins-1]\n// 3. __syncthreads()\n// 4. Grid-stride loop: s_hist[in[i]]++ (no atomics within block)\n// 5. __syncthreads()\n// 6. Cooperatively: for each bin, atomicAdd(&hist[bin], s_hist[bin])\n//\n// Signature must not change.\n__global__ void histogram_kernel(const int* in, unsigned int* hist,\n                                 size_t N, int num_bins)\n{\n    // TODO:\n    // extern __shared__ unsigned int s_hist[];\n    //\n    // // 1. Cooperatively zero shared histogram\n    // for (int bin = threadIdx.x; bin < num_bins; bin += blockDim.x) {\n    //     s_hist[bin] = 0u;\n    // }\n    // __syncthreads();\n    //\n    // // 2. Grid-stride loop with shared accumulation\n    // size_t i = blockIdx.x * size_t(blockDim.x) + threadIdx.x;\n    // size_t stride = size_t(blockDim.x) * gridDim.x;\n    // for (; i < N; i += stride) {\n    //     int bin = in[i];\n    //     if (bin >= 0 && bin < num_bins) {\n    //         atomicAdd(&s_hist[bin], 1u);\n    //     }\n    // }\n    // __syncthreads();\n    //\n    // // 3. Cooperatively merge to global memory\n    // for (int bin = threadIdx.x; bin < num_bins; bin += blockDim.x) {\n    //     if (s_hist[bin] > 0) {\n    //         atomicAdd(&hist[bin], s_hist[bin]);\n    //     }\n    // }\n}\n```"}
{"type": "coding", "id": "ch10-reduction-max-arbitrary", "task_dir": "evaluation-tasks/ch10-reduction-max-arbitrary", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch10-reduction-max-arbitrary\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <limits>\n\n// TODO: Implement arbitrary-length maximum reduction with grid-stride, shared\n// memory, and a CAS-loop atomicMax for float. Initialize per-thread local max\n// to -INFINITY when n==0 or no elements in its stride.\n__device__ inline\nvoid atomicMaxFloat(float* addr, float val) {\n    // TODO: Implement via atomicCAS on int bit patterns\n}\n\nextern \"C\" __global__\nvoid reduce_max_arbitrary(const float* in, float* out, int n) {\n    // TODO\n}\n```"}
{"type": "coding", "id": "ch10-reduction-sum-2048", "task_dir": "evaluation-tasks/ch10-reduction-sum-2048", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch10-reduction-sum-2048\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n\n// TODO: Implement convergent shared-memory reduction for exactly 2048 elements.\n// Contract:\n//  - gridDim.x == 1, blockDim.x == 1024\n//  - Each thread loads two elements: in[tid] and in[tid + 1024]\n//  - Reduce in shared memory (convergent pattern), write out[0] only.\nextern \"C\" __global__\nvoid reduce_sum_2048(const float* in, float* out) {\n    // TODO\n    // Suggested shape:\n    // __shared__ float s[1024];\n    // unsigned t = threadIdx.x;\n    // float v = in[t] + in[t + 1024];\n    // s[t] = v;\n    // __syncthreads();\n    // for (unsigned stride = blockDim.x/2; stride >= 1; stride >>= 1) { ... }\n    // if (t == 0) out[0] = s[0];\n}\n```"}
{"type": "coding", "id": "ch10-reduction-sum-arbitrary", "task_dir": "evaluation-tasks/ch10-reduction-sum-arbitrary", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch10-reduction-sum-arbitrary\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n\n// TODO: Implement arbitrary-length sum with grid-stride loads, shared-memory\n// reduction per block, and atomicAdd(out, block_sum). Use dynamic shared memory.\nextern \"C\" __global__\nvoid reduce_sum_arbitrary(const float* in, float* out, int n) {\n    // TODO\n    // Suggested shape:\n    // extern __shared__ float s[];\n    // int tid = threadIdx.x;\n    // long long idx = blockIdx.x * (long long)blockDim.x * 2 + tid;\n    // long long stride = (long long)gridDim.x * blockDim.x * 2;\n    // float sum = 0.f;\n    // for (; idx < n; idx += stride) {\n    //   sum += in[idx];\n    //   long long idx2 = idx + blockDim.x;\n    //   if (idx2 < n) sum += in[idx2];\n    // }\n    // s[tid] = sum; __syncthreads();\n    // for (int step = blockDim.x/2; step >= 1; step >>= 1) { ... }\n    // if (tid==0) atomicAdd(out, s[0]);\n}\n```"}
{"type": "coding", "id": "ch12-merge-path-single-turn", "task_dir": "evaluation-tasks/ch12-merge-path-single-turn", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch12-merge-path-single-turn\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n\n// TODO: Implement parallel merge using merge-path (diagonal partition).\n// Contract summary:\n//  - Stable: on ties, choose A first\n//  - Partition per thread using diagonals; then sequentially merge that slice\n//  - Inputs A,B are sorted ascending; write C of length nA+nB\n\n__device__ inline int clampi(int x, int lo, int hi) {\n    return x < lo ? lo : (x > hi ? hi : x);\n}\n\n// Find (i,j) on diagonal d (i+j = d) satisfying merge-path conditions.\n// Returns i; j = d - i.\n// Invariants:\n//   lo = max(0, d - nB), hi = min(d, nA)\n// Stable tie-breaking: A[i-1] <= B[j]  (and B[j-1] < A[i])\n__device__ __forceinline__\nint merge_path_search(const int* __restrict__ A, int nA,\n                      const int* __restrict__ B, int nB,\n                      int d)\n{\n    // TODO: Implement binary search to find merge-path coordinates\n    // Return i such that (i, d-i) satisfies merge-path conditions\n    return 0; // placeholder\n}\n\nextern \"C\" __global__\nvoid merge_path_kernel(const int* __restrict__ A, int nA,\n                       const int* __restrict__ B, int nB,\n                       int* __restrict__ C)\n{\n    // TODO: Implement merge-path parallel merge\n    // 1. Calculate thread's diagonal range [d0, d1)\n    // 2. Find merge coordinates (i0,j0) and (i1,j1) using merge_path_search\n    // 3. Sequentially merge A[i0..i1) and B[j0..j1) into C[d0..d1)\n}\n```"}
{"type": "coding", "id": "ch13-merge-path-fullsort-single", "task_dir": "evaluation-tasks/ch13-merge-path-fullsort-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch13-merge-path-fullsort-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch13-merge-path-fullsort-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n\n// CONTRACT:\n// Implement stable GPU merge sort using iterative merge passes.\n// You must implement:\n//   - merge_path_search (device): diagonal search\n//   - merge_path_kernel (global): merges a slice [d0,d1)\n//   - gpu_merge_sort (host): doubles width and ping-pongs buffers until sorted\n\n__device__ int merge_path_search(const uint32_t* A, int nA,\n                                 const uint32_t* B, int nB,\n                                 int d)\n{\n    // TODO: diagonal binary search; return i (then j=d-i)\n    return 0;\n}\n\n__global__ void merge_path_kernel(const uint32_t* __restrict__ A, int nA,\n                                  const uint32_t* __restrict__ B, int nB,\n                                  uint32_t* __restrict__ C)\n{\n    // TODO:\n    //  - P = total threads\n    //  - segment size seg = ceil((nA+nB)/P)\n    //  - each thread t merges its slice [d0,d1)\n    //  - compute (i0,j0) & (i1,j1) via merge_path_search\n    //  - sequentially merge into C[d0..d1)\n}\n\nextern \"C\" void gpu_merge_sort(const uint32_t* d_in, uint32_t* d_out, int n)\n{\n    // TODO:\n    //  - width = 1; ping-pong buffers\n    //  - for width < n:\n    //      * launch merges of adjacent runs [k..k+width) and [k+width..k+2*width)\n    //  - final result copied to d_out\n    (void)d_in; (void)d_out; (void)n;\n}\n```"}
{"type": "coding", "id": "ch13-radix-multiradix-coarsened-fullsort-single", "task_dir": "evaluation-tasks/ch13-radix-multiradix-coarsened-fullsort-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch13-radix-multiradix-coarsened-fullsort-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n#include <cstdio>\n\nstatic inline void CK(cudaError_t e,const char* m){\n    if(e!=cudaSuccess){ std::fprintf(stderr,\"CUDA %s: %s\\n\", m, cudaGetErrorString(e)); std::exit(2); }\n}\n\n#ifndef RADIX_BITS\n#define RADIX_BITS 4\n#endif\n#ifndef COARSENING_FACTOR\n#define COARSENING_FACTOR 8\n#endif\n#ifndef BLOCK\n#define BLOCK 256\n#endif\n\n// TODO: Implement a stable 4-bit LSD radix sort with thread coarsening (COARSENING_FACTOR).\n// Sort must be in-place on `data` (you may use a temp buffer internally).\n\n// Choose your radix size and coarsening factor\n#define RADIX_SIZE (1 << RADIX_BITS)  // 2^RADIX_BITS buckets\n#define RADIX_MASK (RADIX_SIZE - 1)\n\nextern \"C\" __global__\nvoid radix_sort_coarsened_kernel(unsigned int* __restrict__ data,\n                                unsigned int* __restrict__ temp,\n                                int n,\n                                int shift)\n{\n    // TODO: Implement coarsened multi-radix sort pass\n    // 1. Each thread loads COARSENING_FACTOR elements\n    // 2. Count elements for each bucket using coarsened loading\n    // 3. Compute prefix sums to find output positions for each bucket\n    // 4. Scatter elements to correct positions based on radix value\n    // 5. Ensure stable sorting and efficient memory access patterns\n}\n\nextern \"C\"\nvoid radix_sort_coarsened_host(unsigned int* data, int n)\n{\n    if(n <= 1) return;\n    // Placeholder: shallow copy (intentionally insufficient so tests fail until implemented)\n    unsigned int* tmp=nullptr;\n    CK(cudaMalloc(&tmp, n*sizeof(unsigned int)), \"malloc tmp\");\n    CK(cudaMemcpy(tmp, data, n*sizeof(unsigned int), cudaMemcpyDeviceToDevice), \"copy to tmp\");\n    CK(cudaMemcpy(data, tmp, n*sizeof(unsigned int), cudaMemcpyDeviceToDevice), \"copy back\");\n    cudaFree(tmp);\n}\n```"}
{"type": "coding", "id": "ch13-radix-multiradix-fullsort-single", "task_dir": "evaluation-tasks/ch13-radix-multiradix-fullsort-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch13-radix-multiradix-fullsort-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n#include <cstdio>\n\nstatic inline void CK(cudaError_t e, const char* m){\n    if(e != cudaSuccess){\n        std::fprintf(stderr, \"CUDA %s: %s\\n\", m, cudaGetErrorString(e));\n        std::exit(2);\n    }\n}\n\n// TODO: Implement a multiradix (RADIX_BITS=4) stable radix sort in-place.\n// Contract:\n//   extern \"C\" void radix_sort_multiradix_host(unsigned int* data, int n);\n// Requirements:\n//   - Sort ascending, stable per pass (4-bit buckets, 8 passes total).\n//   - Arbitrary n (including 0 / non-multiples of block size).\n//   - No OOB writes (tests use guarded buffers).\n//   - In-place on `data` (you may use an internal device temp buffer).\n\n// Choose your radix size (recommended: 2-bit or 4-bit)\n#define RADIX_BITS 2\n#define RADIX_SIZE (1 << RADIX_BITS)  // 2^RADIX_BITS buckets\n#define RADIX_MASK (RADIX_SIZE - 1)\n\nextern \"C\" __global__\nvoid radix_sort_multiradix_kernel(unsigned int* __restrict__ data,\n                                 unsigned int* __restrict__ temp,\n                                 int n,\n                                 int shift)\n{\n    // TODO: Implement multi-radix sort pass\n    // 1. Count elements for each bucket (RADIX_SIZE buckets) using shared memory\n    // 2. Compute prefix sums to find output positions for each bucket\n    // 3. Scatter elements to correct positions based on radix value\n    // 4. Ensure stable sorting (preserve relative order for equal keys)\n}\n\nextern \"C\"\nvoid radix_sort_multiradix_host(unsigned int* data, int n)\n{\n    if (n <= 1) return;\n    // Starter behavior: shallow copy → will fail non-trivial tests.\n    unsigned int* tmp = nullptr;\n    CK(cudaMalloc(&tmp, n*sizeof(unsigned int)), \"malloc tmp\");\n    CK(cudaMemcpy(tmp, data, n*sizeof(unsigned int), cudaMemcpyDeviceToDevice), \"copy tmp\");\n    CK(cudaMemcpy(data, tmp, n*sizeof(unsigned int), cudaMemcpyDeviceToDevice), \"copy back\");\n    cudaFree(tmp);\n}\n```"}
{"type": "coding", "id": "ch13-radix-naive-1bit-fullsort-single", "task_dir": "evaluation-tasks/ch13-radix-naive-1bit-fullsort-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch13-radix-naive-1bit-fullsort-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// student_kernel.cu\n// TODO: Implement a naive 1-bit parallel radix sort that fully sorts 32-bit keys\n// across 32 passes (LSB -> MSB), stable per pass.\n//\n// Requirements:\n//  - API: extern \"C\" void radix_sort_1bit_host(unsigned int* data, int n)\n//  - In-place: modify data in-place\n//  - Stability: within each bit-partition, preserve relative order (use zerosBefore(i) / onesBefore(i))\n//  - Multi-pass orchestration: 32 passes, swap ping-pong buffers every pass\n//  - Correct for arbitrary n (n can be 0)\n//  - No OOB writes: only write within [0..n)\n//\n// Hints (not mandatory, but aligned with the reference):\n//  - kFlagZeros: flagsZero[i] = 1 if ((x >> bit)&1)==0 else 0\n//  - kBlockExclusiveScan + host scan over block sums for robustness\n//  - kAddBlockOffsets to turn per-block exclusive scans into global exclusive scan\n//  - kScatter uses stable positions:\n//        if bit==0: pos = zerosBefore(i)\n//        else      : pos = totalZeros + (i - zerosBefore(i))\n\n#include <cuda_runtime.h>\n\n// TODO: Implement naive 1-bit radix sort using LSD approach.\n// Contract summary:\n//  - Stable: equal elements maintain relative order\n//  - Process 1 bit per pass, 32 passes total for uint32_t\n//  - Use parallel counting, prefix sum, and scattering\n//  - Sort in ascending order\n\nextern \"C\" __global__\nvoid radix_sort_1bit_kernel(unsigned int* __restrict__ data,\n                           unsigned int* __restrict__ temp,\n                           int n,\n                           int bit)\n{\n    // TODO: Implement single-bit radix sort pass\n    // 1. Count elements with bit=0 and bit=1 using shared memory\n    // 2. Compute prefix sums to find output positions\n    // 3. Scatter elements to correct positions based on bit value\n    // 4. Ensure stable sorting (preserve relative order for equal keys)\n}\n\nextern \"C\"\nvoid radix_sort_1bit_host(unsigned int* data, int n)\n{\n    // TODO: Implement host function that orchestrates 32 sorting passes\n    // 1. Allocate temporary buffer\n    // 2. For each bit position (0 to 31):\n    //    - Launch radix_sort_1bit_kernel\n    //    - Swap data and temp pointers\n    // 3. Ensure final result is in original data array\n    // 4. Clean up temporary buffer\n}\n```"}
{"type": "coding", "id": "ch13-radix-onepass-multiradix-single", "task_dir": "evaluation-tasks/ch13-radix-onepass-multiradix-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch13-radix-onepass-multiradix-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch13-radix-onepass-multiradix-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n\n// CONTRACT:\n// Implement one *stable* multiradix pass over keys.\n// - keys_d:  input keys (length n)\n// - out_d:   output keys (length n)\n// - n:       number of elements\n// - r:       bits per pass (1, 2, or 4)\n// - shift:   bit shift for the digit (e.g., 0, r, 2r, ...)\n// Approach expected (typical):\n//   1) extract digits (0..(2^r - 1))\n//   2) per-block histogram -> global array [grid x buckets]\n//   3) host exclusive scan to get global bases & per-block bucket bases\n//   4) stable scatter into out_d using digit, globalBase[b], blockBase[block,b], and local offset within block\n// NOTE: Stability means equal digits preserve the original order.\n\nextern \"C\" void radix_onepass_multiradix(\n    const uint32_t* keys_d, uint32_t* out_d,\n    int n, int r, int shift);\n\n// TODO: provide your implementation\nextern \"C\" void radix_onepass_multiradix(\n    const uint32_t* keys_d, uint32_t* out_d,\n    int n, int r, int shift)\n{\n    // Implement kernels + host prefix here.\n    // You may choose blockDim=256 and compute grid from n.\n    // (Any correct stable implementation passes.)\n    (void)keys_d; (void)out_d; (void)n; (void)r; (void)shift;\n}\n```"}
{"type": "coding", "id": "ch14-spmv-csr-thread-per-row-single", "task_dir": "evaluation-tasks/ch14-spmv-csr-thread-per-row-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch14-spmv-csr-thread-per-row-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch14-spmv-csr-thread-per-row-single / student_kernel.cu\n#include <cuda_runtime.h>\n\n// CONTRACT:\n// Compute y = A * x where A is in CSR format (thread-per-row).\n// Inputs (device):\n//   rowPtr[m+1], colIdx[nnz], vals[nnz] (float), x[n]\n// Output (device):\n//   y[m] (float)  -- kernel must overwrite y[i] with the row sum\n//\n// Requirements:\n//  - Each thread processes one row (use grid-stride over rows).\n//  - Bounds checks on row index.\n//  - No atomics required (each row written by a single thread).\n//\n// Signature used by tests:\nextern \"C\" __global__\nvoid spmv_csr_kernel(const int* __restrict__ rowPtr,\n                     const int* __restrict__ colIdx,\n                     const float* __restrict__ vals,\n                     const float* __restrict__ x,\n                     float* __restrict__ y,\n                     int m)\n{\n    // TODO: implement CSR thread-per-row\n    // Suggested pattern:\n    // for (int row = blockIdx.x*blockDim.x + threadIdx.x;\n    //      row < m;\n    //      row += blockDim.x*gridDim.x)\n    // {\n    //     int start = rowPtr[row];\n    //     int end   = rowPtr[row+1];\n    //     float sum = 0.f;\n    //     for (int j=start;j<end;++j){\n    //         sum += vals[j] * x[colIdx[j]];\n    //     }\n    //     y[row] = sum;\n    // }\n}\n```"}
{"type": "coding", "id": "ch14-spmv-coo-single", "task_dir": "evaluation-tasks/ch14-spmv-coo-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch14-spmv-coo-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch14-spmv-coo-single / student_kernel.cu\n#include <cuda_runtime.h>\n\n// CONTRACT:\n// Compute y = A * x where A is in COO format.\n// Inputs (device):\n//   row_idx[nnz], col_idx[nnz], vals[nnz]   (float)\n//   x[n]  (float)\n// Output (device):\n//   y[m]  (float)  -- test harness zero-initializes y before launch\n//\n// Requirements:\n//  - Grid-stride loop over nnz\n//  - Bounds check\n//  - Use atomicAdd(&y[row], vals[k] * x[col]) to handle duplicates\n//  - No writes to inputs, only y is modified\n//\n// Signature used by tests:\nextern \"C\" __global__\nvoid spmv_coo_kernel(const int* __restrict__ row_idx,\n                     const int* __restrict__ col_idx,\n                     const float* __restrict__ vals,\n                     const float* __restrict__ x,\n                     float* __restrict__ y,\n                     int nnz)\n{\n    // TODO: implement COO SpMV\n    // Suggested pattern:\n    // for (int k = blockIdx.x*blockDim.x + threadIdx.x; k < nnz; k += blockDim.x*gridDim.x) {\n    //     int r = row_idx[k];\n    //     int c = col_idx[k];\n    //     float a = vals[k];\n    //     atomicAdd(&y[r], a * x[c]);\n    // }\n}\n```"}
{"type": "coding", "id": "ch14-spmv-ell-single", "task_dir": "evaluation-tasks/ch14-spmv-ell-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch14-spmv-ell-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch14-spmv-ell-single / student_kernel.cu\n#include <cuda_runtime.h>\n\n// CONTRACT (ELL):\n//  - y = A * x, A in ELL format with row-major storage of width K.\n//  - Arrays sized: colIdx[m*K], vals[m*K]. Slot (i,t) is at i*K + t.\n//  - Padding slots have colIdx < 0 and MUST be ignored.\n//  - One thread computes one row (grid-stride over rows). No atomics needed.\n//  - Overwrite y[i] with the row sum.\n//\n// Signature used by tests:\nextern \"C\" __global__\nvoid spmv_ell_kernel(const int* __restrict__ colIdx,\n                     const float* __restrict__ vals,\n                     const float* __restrict__ x,\n                     float* __restrict__ y,\n                     int m, int K)\n{\n    // TODO: implement ELL SpMV (thread-per-row, grid-stride)\n    // for (int row = blockIdx.x*blockDim.x + threadIdx.x;\n    //      row < m;\n    //      row += blockDim.x*gridDim.x) {\n    //   float sum = 0.f;\n    //   int base = row * K;\n    //   for (int t=0; t<K; ++t) {\n    //       int c = colIdx[base + t];\n    //       if (c >= 0) sum += vals[base + t] * x[c];\n    //   }\n    //   y[row] = sum;\n    // }\n}\n```"}
{"type": "coding", "id": "ch14-spmv-jds-single", "task_dir": "evaluation-tasks/ch14-spmv-jds-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch14-spmv-jds-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch14-spmv-jds-single / student_kernel.cu\n#include <cuda_runtime.h>\n\nextern \"C\" __global__\nvoid spmv_jds_kernel(const int* __restrict__ colJds,\n                     const float* __restrict__ valJds,\n                     const int* __restrict__ permute,\n                     const int* __restrict__ jdPtr,\n                     const float* __restrict__ x,\n                     float* __restrict__ y,\n                     int m, int maxJ)\n{\n    // TODO: Implement JDS SpMV\n    // 1) Each thread handles one row: tid maps to permuted row permute[tid]\n    // 2) For each jagged diagonal d=0..maxJ-1:\n    //    - Check if this row has an entry in diagonal d: if tid < jdPtr[d+1]-jdPtr[d]\n    //    - If yes, find the JDS index: jds_idx = jdPtr[d] + tid\n    //    - Accumulate: sum += valJds[jds_idx] * x[colJds[jds_idx]]\n    // 3) Write result to original position: y[permute[tid]] = sum\n    //\n    // Use grid-stride loop over permuted rows:\n    // for(int tid = blockIdx.x * blockDim.x + threadIdx.x;\n    //     tid < m;\n    //     tid += blockDim.x * gridDim.x) {\n    //   int orig_row = permute[tid];\n    //   float sum = 0.f;\n    //   for(int d = 0; d < maxJ; d++) {\n    //     int diag_size = jdPtr[d+1] - jdPtr[d];\n    //     if(tid < diag_size) {\n    //       int jds_idx = jdPtr[d] + tid;\n    //       sum += valJds[jds_idx] * x[colJds[jds_idx]];\n    //     }\n    //   }\n    //   y[orig_row] = sum;\n    // }\n}\n\nextern \"C\" void spmv_jds(const int* colJds, const float* valJds,\n                         const int* permute, const int* jdPtr,\n                         const float* x, float* y, int m, int maxJ)\n{\n    // TODO: Launch JDS kernel with appropriate grid/block configuration\n    // dim3 block(256);\n    // int grid = max(1, (m + block.x - 1) / block.x);\n    // spmv_jds_kernel<<<grid, block>>>(colJds, valJds, permute, jdPtr, x, y, m, maxJ);\n    // cudaDeviceSynchronize();\n}\n```"}
{"type": "coding", "id": "ch14-spmv-hyb-single", "task_dir": "evaluation-tasks/ch14-spmv-hyb-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch14-spmv-hyb-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch14-spmv-hyb-single / student_kernel.cu\n#include <cuda_runtime.h>\n\nextern \"C\" __global__\nvoid spmv_ell_rows_kernel(const int* __restrict__ colEll,\n                          const float* __restrict__ valEll,\n                          const float* __restrict__ x,\n                          float* __restrict__ y,\n                          int m, int K)\n{\n    // TODO: thread-per-row (grid-stride). For each row:\n    //   sum over K slots (ignore slots with col = -1), write y[row] = sum.\n    //   y is assumed zero-initialized by the host before this kernel.\n    //   Overwrite y[row] (do NOT atomicAdd here).\n    // for(int row = blockIdx.x*blockDim.x + threadIdx.x;\n    //     row < m;\n    //     row += blockDim.x*gridDim.x) {\n    //   float s = 0.f;\n    //   int base = row * K;\n    //   for(int t=0;t<K;++t){\n    //     int c = colEll[base + t];\n    //     if(c >= 0) s += valEll[base + t] * x[c];\n    //   }\n    //   y[row] = s;\n    // }\n}\n\nextern \"C\" __global__\nvoid spmv_coo_accum_kernel(const int* __restrict__ rowCoo,\n                           const int* __restrict__ colCoo,\n                           const float* __restrict__ valCoo,\n                           const float* __restrict__ x,\n                           float* __restrict__ y,\n                           int nnzC)\n{\n    // TODO: grid-stride over nnzC and do:\n    //   atomicAdd(&y[rowCoo[k]], valCoo[k] * x[colCoo[k]]);\n    // for(int k = blockIdx.x*blockDim.x + threadIdx.x;\n    //     k < nnzC;\n    //     k += blockDim.x*gridDim.x){\n    //   atomicAdd(&y[rowCoo[k]], valCoo[k] * x[colCoo[k]]);\n    // }\n}\n\nextern \"C\" void spmv_hyb(const int* colEll, const float* valEll, int m, int K,\n                         const int* rowCoo, const int* colCoo, const float* valCoo, int nnzC,\n                         const float* x, float* y)\n{\n    // TODO:\n    //  - Launch ELL pass first (overwrite y[row]).\n    //  - Then launch COO pass (atomicAdd to y[row]).\n    // dim3 block(256);\n    // auto cdiv=[](int a,int b){return (a+b-1)/b;};\n    // int gridEll = max(1, cdiv(m, (int)block.x));\n    // int gridCoo = max(1, cdiv(nnzC, (int)block.x));\n    // spmv_ell_rows_kernel<<<gridEll, block>>>(colEll, valEll, x, y, m, K);\n    // cudaDeviceSynchronize();\n    // spmv_coo_accum_kernel<<<gridCoo, block>>>(rowCoo, colCoo, valCoo, x, y, nnzC);\n    // cudaDeviceSynchronize();\n}\n```"}
{"type": "coding", "id": "ch14-coo-to-csr-single", "task_dir": "evaluation-tasks/ch14-coo-to-csr-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch14-coo-to-csr-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch14-coo-to-csr-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <cstdio>\n\n// CONTRACT:\n// Convert an unsorted COO (row[], col[], val[]) of length nnz into CSR:\n//   rowPtr[m+1], colCSR[nnz], valCSR[nnz]\n// Requirements:\n//  - Stability: within the same row, preserve the original COO order\n//  - Handle empty rows and duplicates (do not combine duplicates)\n//  - rowPtr[m] must equal nnz\n// Strategy hint (one correct approach, not the only one):\n//  1) Device histogram of row counts with atomics -> rowCounts[m]\n//  2) Host exclusive scan on rowCounts -> rowPtr\n//  3) Stable scatter: easiest is a single-thread device kernel that walks\n//     i=0..nnz-1 in order and writes to CSR using a per-row cursor (rowNext)\n//     initialized to rowPtr. (Correctness over performance.)\n\nextern \"C\" __global__\nvoid k_hist_rows(const int* __restrict__ row, int nnz, int m, int* __restrict__ rowCounts)\n{\n    // TODO: Grid-stride loop to count entries per row\n    // for (int i = blockIdx.x * blockDim.x + threadIdx.x;\n    //      i < nnz;\n    //      i += blockDim.x * gridDim.x)\n    // {\n    //     int r = row[i];\n    //     if (0 <= r && r < m) {\n    //         atomicAdd(&rowCounts[r], 1);\n    //     }\n    // }\n}\n\nextern \"C\" __global__\nvoid k_stable_scatter_single(const int* __restrict__ row,\n                             const int* __restrict__ col,\n                             const float* __restrict__ val,\n                             int nnz, int m,\n                             int* __restrict__ rowNext,   // starts as rowPtr (device)\n                             int* __restrict__ colCSR,\n                             float* __restrict__ valCSR)\n{\n    // TODO: Single thread preserves COO order for stability\n    // One thread executes in COO input order for stability\n    // if (blockIdx.x == 0 && threadIdx.x == 0) {\n    //     for (int i = 0; i < nnz; ++i) {\n    //         int r = row[i];\n    //         if (0 <= r && r < m) {\n    //             int pos = rowNext[r]++;      // stable: increasing with input order\n    //             colCSR[pos] = col[i];\n    //             valCSR[pos] = val[i];\n    //         }\n    //     }\n    // }\n}\n\nextern \"C\" void coo_to_csr(const int* d_row, const int* d_col, const float* d_val,\n                           int nnz, int m, int /*n*/,\n                           int* d_rowPtr, int* d_colCSR, float* d_valCSR)\n{\n    // TODO: Implement COO to CSR conversion\n    // Edge cases\n    // if (m < 0 || nnz < 0) return;\n    // if (m <= 0) {\n    //     int zero = 0;\n    //     cudaMemcpy(d_rowPtr, &zero, sizeof(int), cudaMemcpyHostToDevice);\n    //     return;\n    // }\n\n    // 1) rowCounts on device\n    // int* d_rowCounts = nullptr;\n    // cudaMalloc(&d_rowCounts, m * sizeof(int));\n    // cudaMemset(d_rowCounts, 0, m * sizeof(int));\n\n    // dim3 block(256);\n    // auto cdiv = [](int a, int b){ return (a + b - 1)/b; };\n    // int gx = nnz > 0 ? std::max(1, cdiv(nnz, (int)block.x)) : 1;\n    // gx = std::min(gx, 65535);\n    // dim3 grid(gx);\n\n    // k_hist_rows<<<grid, block>>>(d_row, nnz, m, d_rowCounts);\n    // cudaDeviceSynchronize();\n\n    // 2) exclusive scan on host (write d_rowPtr)\n    // Download counts\n    // std::vector<int> h_counts(m);\n    // cudaMemcpy(h_counts.data(), d_rowCounts, m*sizeof(int), cudaMemcpyDeviceToHost);\n\n    // std::vector<int> h_rowPtr(m+1, 0);\n    // for (int i = 0; i < m; ++i) h_rowPtr[i+1] = h_rowPtr[i] + h_counts[i];\n\n    // Upload rowPtr\n    // cudaMemcpy(d_rowPtr, h_rowPtr.data(), (m+1)*sizeof(int), cudaMemcpyHostToDevice);\n\n    // 3) stable scatter (single-thread kernel) using rowNext = rowPtr\n    // int* d_rowNext = nullptr;\n    // cudaMalloc(&d_rowNext, m * sizeof(int));\n    // cudaMemcpy(d_rowNext, h_rowPtr.data(), m*sizeof(int), cudaMemcpyHostToDevice);\n\n    // k_stable_scatter_single<<<1, 1>>>(d_row, d_col, d_val, nnz, m, d_rowNext, d_colCSR, d_valCSR);\n    // cudaDeviceSynchronize();\n\n    // cudaFree(d_rowCounts);\n    // cudaFree(d_rowNext);\n}\n```"}
{"type": "coding", "id": "ch15-bfs-push-single", "task_dir": "evaluation-tasks/ch15-bfs-push-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch15-bfs-push-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch15-bfs-push-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <limits.h>\n\n#ifndef INF_LVL\n#define INF_LVL 0x3f3f3f3f\n#endif\n\n// CONTRACT (Push BFS):\n// - Input graph in CSR on device: d_row_ptr[V+1], d_col_idx[E]\n// - Source vertex 'src' in [0, V)\n// - Must compute levels: d_level[v] = BFS distance from src, or INF_LVL if unreachable\n// - Use vertex-centric PUSH per frontier level:\n//     For each u in frontier, for neighbors v: if (level[v]==INF) then\n//       level[v] = cur_level+1  (must be atomic to ensure single-writer)\n//       append v to next_frontier\n// - Terminate when frontier_size becomes 0\n// - Do NOT modify CSR arrays. Only write to d_level and internal frontier buffers.\n//\n// You may choose blockDim=256 and compute grid from frontier_size.\n\n__global__ void _init_levels(int* __restrict__ level, int V, int src){\n    // TODO: Initialize all levels to INF_LVL\n    // int i = blockIdx.x * blockDim.x + threadIdx.x;\n    // if (i < V) level[i] = INF_LVL;\n    // if (i == 0 && src >= 0 && src < V) {\n    //     // src will be set to 0 by caller or here, either is fine\n    // }\n}\n\n__global__ void bfs_push_kernel(const int* __restrict__ row_ptr,\n                                const int* __restrict__ col_idx,\n                                const int* __restrict__ frontier,\n                                int frontier_size,\n                                int* __restrict__ next_frontier,\n                                int* __restrict__ next_frontier_size,\n                                int* __restrict__ level,\n                                int cur_level)\n{\n    // TODO: Implement push-based BFS kernel\n    // int t = blockIdx.x * blockDim.x + threadIdx.x;\n    // if (t >= frontier_size) return;\n\n    // int u = frontier[t];\n    // int beg = row_ptr[u];\n    // int end = row_ptr[u+1];\n    // for (int e = beg; e < end; ++e) {\n    //     int v = col_idx[e];\n    //     // discover v once\n    //     if (atomicCAS(&level[v], INF_LVL, cur_level + 1) == INF_LVL) {\n    //         int pos = atomicAdd(next_frontier_size, 1);\n    //         next_frontier[pos] = v;\n    //     }\n    // }\n}\n\nextern \"C\" void bfs_push_gpu(const int* d_row_ptr,\n                             const int* d_col_idx,\n                             int V, int E,\n                             int src,\n                             int* d_level)\n{\n    // TODO: Implement complete BFS push algorithm\n    // if (V <= 0) return;\n\n    // // 1) initialize levels\n    // dim3 b(256), g((V + b.x - 1)/b.x);\n    // _init_levels<<<g,b>>>(d_level, V, src);\n    // cudaDeviceSynchronize();\n\n    // // set src level = 0\n    // int zero = 0;\n    // cudaMemcpy(d_level + src, &zero, sizeof(int), cudaMemcpyHostToDevice);\n\n    // // 2) allocate frontiers (length <= V)\n    // int *d_frontier = nullptr, *d_next_frontier = nullptr;\n    // int *d_next_size = nullptr;\n    // cudaMalloc(&d_frontier,      V * sizeof(int));\n    // cudaMalloc(&d_next_frontier, V * sizeof(int));\n    // cudaMalloc(&d_next_size,     sizeof(int));\n\n    // // start frontier = {src}\n    // cudaMemcpy(d_frontier, &src, sizeof(int), cudaMemcpyHostToDevice);\n\n    // int h_frontier_size = 1;\n    // int cur_level = 0;\n\n    // while (h_frontier_size > 0) {\n    //     cudaMemset(d_next_size, 0, sizeof(int));\n\n    //     dim3 kb(256), kg((h_frontier_size + kb.x - 1) / kb.x);\n    //     bfs_push_kernel<<<kg, kb>>>(d_row_ptr, d_col_idx,\n    //                                 d_frontier, h_frontier_size,\n    //                                 d_next_frontier, d_next_size,\n    //                                 d_level, cur_level);\n    //     cudaDeviceSynchronize();\n\n    //     // next size\n    //     int h_next = 0;\n    //     cudaMemcpy(&h_next, d_next_size, sizeof(int), cudaMemcpyDeviceToHost);\n\n    //     // swap\n    //     std::swap(d_frontier, d_next_frontier);\n    //     h_frontier_size = h_next;\n    //     ++cur_level;\n    // }\n\n    // cudaFree(d_frontier);\n    // cudaFree(d_next_frontier);\n    // cudaFree(d_next_size);\n}\n```"}
{"type": "coding", "id": "ch15-bfs-pull-single", "task_dir": "evaluation-tasks/ch15-bfs-pull-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch15-bfs-pull-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch15-bfs-pull-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <limits.h>\n\n#ifndef INF_LVL\n#define INF_LVL 0x3f3f3f3f\n#endif\n\n// CONTRACT (Pull BFS):\n// - Input CSR on device: d_row_ptr[V+1], d_col_idx[E]\n// - Maintain two frontier bitmaps on device: in_frontier[v], out_frontier[v]\n// - Initialization: level[:] = INF_LVL; level[src] = 0; in_frontier[src]=1\n// - Per level: for each vertex v with level[v]==INF, scan neighbors u;\n//     if any u in in_frontier, then level[v]=cur_level+1; out_frontier[v]=1; atomicAdd(next_count,1)\n// - Terminate when next_count==0\n// - Do NOT modify CSR\n\n__global__ void _init_levels_pull(int* __restrict__ level, int V, int src){\n    // TODO: Initialize all levels to INF_LVL\n    // int i = blockIdx.x * blockDim.x + threadIdx.x;\n    // if (i < V) level[i] = INF_LVL;\n    // if (i == 0 && src>=0 && src<V) { /* src set below */ }\n}\n\n__global__ void _clear_bitmap(unsigned char* __restrict__ bm, int V){\n    // TODO: Clear bitmap to all zeros\n    // int i = blockIdx.x * blockDim.x + threadIdx.x;\n    // if (i < V) bm[i] = 0;\n}\n\n__global__ void _set_single(unsigned char* __restrict__ bm, int idx){\n    // TODO: Set single bit in bitmap\n    // if (threadIdx.x==0 && blockIdx.x==0) bm[idx] = 1;\n}\n\n__global__ void bfs_pull_kernel(const int* __restrict__ row_ptr,\n                                const int* __restrict__ col_idx,\n                                const unsigned char* __restrict__ in_frontier,\n                                unsigned char* __restrict__ out_frontier,\n                                int* __restrict__ level,\n                                int cur_level,\n                                int V,\n                                int* __restrict__ next_count)\n{\n    // TODO: Implement pull-based BFS kernel\n    // int v = blockIdx.x * blockDim.x + threadIdx.x;\n    // if (v >= V) return;\n\n    // if (level[v] != INF_LVL) return; // already discovered\n\n    // int beg = row_ptr[v];\n    // int end = row_ptr[v+1];\n\n    // // pull: find ANY neighbor u in current frontier\n    // for (int e = beg; e < end; ++e) {\n    //     int u = col_idx[e];\n    //     if (in_frontier[u]) {\n    //         level[v] = cur_level + 1;   // single-writer (this thread)\n    //         out_frontier[v] = 1;\n    //         atomicAdd(next_count, 1);\n    //         break;\n    //     }\n    // }\n}\n\nextern \"C\" void bfs_pull_gpu(const int* d_row_ptr,\n                             const int* d_col_idx,\n                             int V, int E,\n                             int src,\n                             int* d_level)\n{\n    // TODO: Implement complete BFS pull algorithm\n    // if (V <= 0) return;\n\n    // dim3 b(256), g((V + b.x - 1)/b.x);\n\n    // // levels\n    // _init_levels_pull<<<g,b>>>(d_level, V, src);\n    // cudaDeviceSynchronize();\n    // int zero = 0;\n    // cudaMemcpy(d_level + src, &zero, sizeof(int), cudaMemcpyHostToDevice);\n\n    // // bitmaps\n    // unsigned char *d_in = nullptr, *d_out = nullptr;\n    // cudaMalloc(&d_in,  V*sizeof(unsigned char));\n    // cudaMalloc(&d_out, V*sizeof(unsigned char));\n    // _clear_bitmap<<<g,b>>>(d_in, V);\n    // _clear_bitmap<<<g,b>>>(d_out, V);\n    // _set_single<<<1,1>>>(d_in, src);\n\n    // int *d_next_count = nullptr;\n    // cudaMalloc(&d_next_count, sizeof(int));\n\n    // int cur_level = 0;\n    // while (true) {\n    //     cudaMemset(d_next_count, 0, sizeof(int));\n    //     _clear_bitmap<<<g,b>>>(d_out, V);\n\n    //     bfs_pull_kernel<<<g,b>>>(d_row_ptr, d_col_idx,\n    //                              d_in, d_out,\n    //                              d_level, cur_level, V,\n    //                              d_next_count);\n    //     cudaDeviceSynchronize();\n\n    //     int h_next = 0;\n    //     cudaMemcpy(&h_next, d_next_count, sizeof(int), cudaMemcpyDeviceToHost);\n    //     if (h_next == 0) break;\n\n    //     std::swap(d_in, d_out);\n    //     ++cur_level;\n    // }\n\n    // cudaFree(d_in); cudaFree(d_out); cudaFree(d_next_count);\n}\n```"}
{"type": "coding", "id": "ch15-bfs-direction-optimized-single", "task_dir": "evaluation-tasks/ch15-bfs-direction-optimized-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch15-bfs-direction-optimized-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch15-bfs-direction-optimized-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <limits.h>\n\n#ifndef INF_LVL\n#define INF_LVL 0x3f3f3f3f\n#endif\n\n// CONTRACT (Direction-Optimized BFS):\n// - Device CSR inputs: d_row_ptr[V+1], d_col_idx[E] (MUST remain unchanged)\n// - Output: d_level[V] = BFS distance from src, or INF_LVL if unreachable\n// - Start in PUSH mode. If frontier gets \"large\", switch to PULL mode.\n//   If frontier later gets \"small\", switch back to PUSH.\n// - Heuristic (simple, deterministic, OK for eval):\n//      switch_to_pull   when frontier_size > V/16\n//      switch_to_push   when frontier_size < V/64\n// - PUSH: for each u in frontier, scan neighbors v; if level[v]==INF -> set & enqueue\n//   (use atomicCAS on level[v] and atomicAdd for enqueue)\n// - PULL: for each undiscovered v, scan neighbors u; if u is in current frontier (bitmap) -> set & enqueue\n//   (use an in_frontier bitmap on device + atomicAdd for enqueue)\n// - Manage frontiers as arrays; when entering PULL, also fill the in_frontier bitmap from the array.\n\n__global__ void _init_levels(int* __restrict__ level, int V){\n    // TODO: Initialize all levels to INF_LVL\n    // int i = blockIdx.x * blockDim.x + threadIdx.x;\n    // if (i < V) level[i] = INF_LVL;\n}\n\n__global__ void _clear_bitmap(unsigned char* __restrict__ bm, int V){\n    // TODO: Clear bitmap to all zeros\n    // int i = blockIdx.x * blockDim.x + threadIdx.x;\n    // if (i < V) bm[i] = 0;\n}\n\n__global__ void _mark_bitmap_from_list(const int* __restrict__ list, int n,\n                                       unsigned char* __restrict__ bm){\n    // TODO: Mark bitmap positions from frontier list\n    // int i = blockIdx.x * blockDim.x + threadIdx.x;\n    // if (i < n) {\n    //     int v = list[i];\n    //     bm[v] = 1;\n    // }\n}\n\n__global__ void k_push(const int* __restrict__ row_ptr,\n                       const int* __restrict__ col_idx,\n                       const int* __restrict__ frontier,\n                       int frontier_size,\n                       int* __restrict__ next_frontier,\n                       int* __restrict__ next_size,\n                       int* __restrict__ level,\n                       int cur_level)\n{\n    // TODO: Implement push kernel\n    // int t = blockIdx.x * blockDim.x + threadIdx.x;\n    // if (t >= frontier_size) return;\n\n    // int u = frontier[t];\n    // int beg = row_ptr[u], end = row_ptr[u+1];\n    // for (int e = beg; e < end; ++e) {\n    //     int v = col_idx[e];\n    //     if (atomicCAS(&level[v], INF_LVL, cur_level + 1) == INF_LVL) {\n    //         int pos = atomicAdd(next_size, 1);\n    //         next_frontier[pos] = v;\n    //     }\n    // }\n}\n\n__global__ void k_pull(const int* __restrict__ row_ptr,\n                       const int* __restrict__ col_idx,\n                       const unsigned char* __restrict__ in_frontier,\n                       int* __restrict__ next_frontier,\n                       int* __restrict__ next_size,\n                       int* __restrict__ level,\n                       int cur_level,\n                       int V)\n{\n    // TODO: Implement pull kernel\n    // int v = blockIdx.x * blockDim.x + threadIdx.x;\n    // if (v >= V) return;\n    // if (level[v] != INF_LVL) return;\n\n    // int beg = row_ptr[v], end = row_ptr[v+1];\n    // for (int e = beg; e < end; ++e) {\n    //     int u = col_idx[e];\n    //     if (in_frontier[u]) {\n    //         level[v] = cur_level + 1;          // single-writer (this thread)\n    //         int pos = atomicAdd(next_size, 1); // also build array frontier\n    //         next_frontier[pos] = v;\n    //         break;\n    //     }\n    // }\n}\n\nextern \"C\" void bfs_direction_optimized_gpu(const int* d_row_ptr,\n                                            const int* d_col_idx,\n                                            int V, int E,\n                                            int src,\n                                            int* d_level)\n{\n    // TODO: Implement complete direction-optimized BFS\n    // if (V <= 0) return;\n\n    // dim3 b(256), g((V + b.x - 1)/b.x);\n\n    // // levels\n    // _init_levels<<<g,b>>>(d_level, V);\n    // cudaDeviceSynchronize();\n    // int zero = 0;\n    // cudaMemcpy(d_level + src, &zero, sizeof(int), cudaMemcpyHostToDevice);\n\n    // // frontier arrays + counters\n    // int *d_frontier=nullptr, *d_next_frontier=nullptr;\n    // int *d_next_size=nullptr;\n    // cudaMalloc(&d_frontier,      V*sizeof(int));\n    // cudaMalloc(&d_next_frontier, V*sizeof(int));\n    // cudaMalloc(&d_next_size,     sizeof(int));\n\n    // // frontier bitmap for pull\n    // unsigned char *d_in_bm=nullptr;\n    // cudaMalloc(&d_in_bm, V*sizeof(unsigned char));\n    // _clear_bitmap<<<g,b>>>(d_in_bm, V);\n\n    // // init frontier = {src}\n    // cudaMemcpy(d_frontier, &src, sizeof(int), cudaMemcpyHostToDevice);\n    // int h_frontier_size = 1;\n    // bool mode_is_pull = false;\n    // int cur_level = 0;\n\n    // while (h_frontier_size > 0) {\n    //     cudaMemset(d_next_size, 0, sizeof(int));\n\n    //     if (!mode_is_pull) {\n    //         // PUSH step\n    //         dim3 kb(256), kg((h_frontier_size + kb.x - 1)/kb.x);\n    //         k_push<<<kg,kb>>>(d_row_ptr, d_col_idx,\n    //                           d_frontier, h_frontier_size,\n    //                           d_next_frontier, d_next_size,\n    //                           d_level, cur_level);\n    //         cudaDeviceSynchronize();\n    //     } else {\n    //         // PULL step: ensure bitmap matches current frontier\n    //         _clear_bitmap<<<g,b>>>(d_in_bm, V);\n    //         dim3 mb(256), mg((h_frontier_size + mb.x - 1)/mb.x);\n    //         _mark_bitmap_from_list<<<mg,mb>>>(d_frontier, h_frontier_size, d_in_bm);\n    //         cudaDeviceSynchronize();\n\n    //         // pull kernel (over all vertices)\n    //         k_pull<<<g,b>>>(d_row_ptr, d_col_idx,\n    //                         d_in_bm,\n    //                         d_next_frontier, d_next_size,\n    //                         d_level, cur_level, V);\n    //         cudaDeviceSynchronize();\n    //     }\n\n    //     // next frontier size\n    //     int h_next = 0;\n    //     cudaMemcpy(&h_next, d_next_size, sizeof(int), cudaMemcpyDeviceToHost);\n\n    //     // Heuristic switching\n    //     if (!mode_is_pull && h_next > V/16) {\n    //         mode_is_pull = true;\n    //     } else if (mode_is_pull && h_next < V/64) {\n    //         mode_is_pull = false;\n    //     }\n\n    //     // swap for next iteration\n    //     std::swap(d_frontier, d_next_frontier);\n    //     h_frontier_size = h_next;\n    //     ++cur_level;\n    // }\n\n    // cudaFree(d_frontier);\n    // cudaFree(d_next_frontier);\n    // cudaFree(d_next_size);\n    // cudaFree(d_in_bm);\n}\n```"}
{"type": "coding", "id": "ch15-bfs-edge-centric-single", "task_dir": "evaluation-tasks/ch15-bfs-edge-centric-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch15-bfs-edge-centric-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch15-bfs-edge-centric-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <limits.h>\n\n#ifndef INF_LVL\n#define INF_LVL 0x3f3f3f3f\n#endif\n\n// CONTRACT (Edge-Centric BFS):\n// - Input CSR on device: d_row_ptr[V+1], d_col_idx[E] (MUST remain unchanged)\n// - Output: d_level[V] = BFS distance from src, or INF_LVL if unreachable\n// - Use EDGE-CENTRIC parallelism: one thread per edge per iteration\n// - For each level: parallel over ALL edges; check if edge (u,v) crosses frontier\n//   i.e., level[u] == cur_level && level[v] == INF_LVL\n//   If so: level[v] = cur_level+1; active_found = true\n// - Terminate when no new vertices are discovered in an iteration\n// - Global atomicMax to signal if any work was done this iteration\n\n__global__ void _init_levels_edge(int* __restrict__ level, int V, int src){\n    // TODO: Initialize all levels to INF_LVL, except src = 0\n    // int i = blockIdx.x * blockDim.x + threadIdx.x;\n    // if (i < V) level[i] = INF_LVL;\n    // if (i == 0 && src >= 0 && src < V) {\n    //     level[src] = 0;\n    // }\n}\n\n__global__ void bfs_edge_centric_kernel(const int* __restrict__ row_ptr,\n                                        const int* __restrict__ col_idx,\n                                        int* __restrict__ level,\n                                        int cur_level,\n                                        int E,\n                                        int* __restrict__ active_found)\n{\n    // TODO: Implement edge-centric BFS kernel\n    // int edge = blockIdx.x * blockDim.x + threadIdx.x;\n    // if (edge >= E) return;\n\n    // // Find source vertex u for this edge\n    // // Binary search or linear scan to find u such that row_ptr[u] <= edge < row_ptr[u+1]\n    // // For simplicity, we'll use a linear approach that works but isn't optimal\n    // // In practice, you might precompute edge-to-vertex mapping\n\n    // // Find the source vertex for this edge using binary search\n    // // This is simplified - in practice you'd want more efficient edge->vertex mapping\n    // int u = 0;\n    // // Simple linear scan to find source vertex (not optimal but correct)\n    // while (u < V && row_ptr[u+1] <= edge) u++;\n    // if (u >= V || edge < row_ptr[u]) return; // Invalid edge\n\n    // int v = col_idx[edge];\n\n    // // Check if this edge crosses the frontier\n    // if (level[u] == cur_level && level[v] == INF_LVL) {\n    //     level[v] = cur_level + 1;\n    //     atomicMax(active_found, 1); // Signal that work was done\n    // }\n}\n\nextern \"C\" void bfs_edge_centric_gpu(const int* d_row_ptr,\n                                     const int* d_col_idx,\n                                     int V, int E,\n                                     int src,\n                                     int* d_level)\n{\n    // TODO: Implement complete edge-centric BFS\n    // if (V <= 0 || E <= 0) return;\n\n    // dim3 b(256), g((V + b.x - 1)/b.x);\n\n    // // Initialize levels\n    // _init_levels_edge<<<g,b>>>(d_level, V, src);\n    // cudaDeviceSynchronize();\n\n    // // Edge-centric requires one thread per edge\n    // dim3 eb(256), eg((E + eb.x - 1)/eb.x);\n\n    // int *d_active = nullptr;\n    // cudaMalloc(&d_active, sizeof(int));\n\n    // int cur_level = 0;\n    // while (true) {\n    //     cudaMemset(d_active, 0, sizeof(int));\n\n    //     bfs_edge_centric_kernel<<<eg,eb>>>(d_row_ptr, d_col_idx,\n    //                                        d_level, cur_level, E,\n    //                                        d_active);\n    //     cudaDeviceSynchronize();\n\n    //     int h_active = 0;\n    //     cudaMemcpy(&h_active, d_active, sizeof(int), cudaMemcpyDeviceToHost);\n    //     if (h_active == 0) break;\n\n    //     ++cur_level;\n    // }\n\n    // cudaFree(d_active);\n}\n```"}
{"type": "coding", "id": "ch16-conv2d-forward-single", "task_dir": "evaluation-tasks/ch16-conv2d-forward-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch16-conv2d-forward-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch16-conv2d-forward-single / student_kernel.cu\n#include <cuda_runtime.h>\n\n// CONTRACT (Conv2D Forward):\n// NCHW input; out shape = N x OC x out_h x out_w\n// No padding. Stride (stride_h, stride_w). Kernel (kernel_h, kernel_w).\n// Bias optional (bias==nullptr treated as zeros).\n//\n// You must implement: each thread computes exactly one output element (n, oc, oh, ow).\n// Index decode is from flat tid in [0, N*OC*out_h*out_w).\n// Guard against bounds, read-only inputs, write-only outputs.\n\nextern \"C\" __global__\nvoid conv2d_forward_kernel(\n    const float* __restrict__ input,      // [N,C,H,W]\n    const float* __restrict__ weight,     // [OC,C,KH,KW]\n    const float* __restrict__ bias,       // [OC] or nullptr\n    float* __restrict__ output,           // [N,OC,OH,OW]\n    int N, int C, int H, int W,\n    int OC, int kernel_h, int kernel_w,\n    int stride_h, int stride_w,\n    int out_h, int out_w)\n{\n    // TODO: Implement Conv2D forward kernel\n    // const int tid = blockIdx.x * blockDim.x + threadIdx.x;\n    // const long long total = 1LL * N * OC * out_h * out_w;\n    // if ((long long)tid >= total) return;\n\n    // // Decode flat -> (n, oc, oh, ow) in N, OC, OH, OW order\n    // int t = tid;\n    // const int ow = t % out_w;  t /= out_w;\n    // const int oh = t % out_h;  t /= out_h;\n    // const int oc = t % OC;     t /= OC;\n    // const int n  = t;\n\n    // const int ih0 = oh * stride_h;\n    // const int iw0 = ow * stride_w;\n\n    // // Base pointers for indexing\n    // // input:  ((n*C + c)*H + ih)*W + iw\n    // // weight: (((oc*C + c)*KH + kh)*KW + kw)\n    // float acc = (bias ? bias[oc] : 0.0f);\n\n    // for (int c = 0; c < C; ++c) {\n    //     for (int kh = 0; kh < kernel_h; ++kh) {\n    //         const int ih = ih0 + kh;\n    //         if (ih >= H) continue;\n    //         for (int kw = 0; kw < kernel_w; ++kw) {\n    //             const int iw = iw0 + kw;\n    //             if (iw >= W) continue;\n\n    //             const long long in_idx =\n    //                 (((long long)n * C + c) * H + ih) * W + iw;\n    //             const long long w_idx =\n    //                 ((((long long)oc * C + c) * kernel_h + kh) * kernel_w + kw);\n\n    //             acc += input[in_idx] * weight[w_idx];\n    //         }\n    //     }\n    // }\n\n    // const long long out_idx =\n    //     (((long long)n * OC + oc) * out_h + oh) * out_w + ow;\n    // output[out_idx] = acc;\n}\n```"}
{"type": "coding", "id": "ch16-maxpool2d-forward-single", "task_dir": "evaluation-tasks/ch16-maxpool2d-forward-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch16-maxpool2d-forward-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch16-maxpool2d-forward-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <float.h>\n\n// CONTRACT (MaxPool2D Forward):\n// input:  [N,C,H,W] NCHW\n// output: [N,C,OH,OW], where OH=(H-KH)/SH+1, OW=(W-KW)/SW+1\n// indices: [N,C,OH,OW] stores argmax *linear index within KH x KW window,\n//          or -1 if no valid (shouldn't happen with valid windows).\n// No padding. Stride (stride_h, stride_w). Kernel (kernel_h, kernel_w).\n//\n// Implement one-thread-per-output element.\n\nextern \"C\" __global__\nvoid maxpool2d_forward_kernel(const float* input, float* output, int* indices,\n                              int batch_size, int channels,\n                              int height, int width,\n                              int kernel_h, int kernel_w,\n                              int stride_h, int stride_w,\n                              int out_h, int out_w)\n{\n    // TODO: Implement MaxPool2D forward kernel\n    // const int tid = blockIdx.x * blockDim.x + threadIdx.x;\n    // const long long total = 1LL*batch_size*channels*out_h*out_w;\n    // if((long long)tid >= total) return;\n\n    // int t = tid;\n    // const int ow = t % out_w;    t /= out_w;\n    // const int oh = t % out_h;    t /= out_h;\n    // const int c  = t % channels; t /= channels;\n    // const int n  = t;\n\n    // const int ih0 = oh * stride_h;\n    // const int iw0 = ow * stride_w;\n\n    // // Base pointers\n    // // input idx: ((n*C + c)*H + ih)*W + iw\n    // float best = -FLT_MAX;\n    // int best_idx = -1;\n\n    // for(int kh=0; kh<kernel_h; ++kh){\n    //     int ih = ih0 + kh; if(ih>=height) continue;\n    //     for(int kw=0; kw<kernel_w; ++kw){\n    //         int iw = iw0 + kw; if(iw>=width) continue;\n    //         const long long idx = (((long long)n*channels + c)*height + ih)*width + iw;\n    //         float v = input[idx];\n    //         const int local_idx = kh*kernel_w + kw;\n    //         if(v > best){ best = v; best_idx = local_idx; }\n    //     }\n    // }\n\n    // const long long out_idx = (((long long)n*channels + c)*out_h + oh)*out_w + ow;\n    // output[out_idx]  = best;\n    // indices[out_idx] = best_idx;\n}\n```"}
{"type": "coding", "id": "ch17-fhd-accumulate-single", "task_dir": "evaluation-tasks/ch17-fhd-accumulate-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch17-fhd-accumulate-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch17-fhd-accumulate-single/student_kernel.cu\n#include <cuda_runtime.h>\n#include <math_constants.h>\n\nextern \"C\" __global__\nvoid fhd_accumulate_kernel(const float* __restrict__ rPhi,\n                           const float* __restrict__ iPhi,\n                           const float* __restrict__ rD,\n                           const float* __restrict__ iD,\n                           const float* __restrict__ kx,\n                           const float* __restrict__ ky,\n                           const float* __restrict__ kz,\n                           const float* __restrict__ x,\n                           const float* __restrict__ y,\n                           const float* __restrict__ z,\n                           int M, int N,\n                           float* __restrict__ rFhD,\n                           float* __restrict__ iFhD)\n{\n    // TODO:\n    // - 1 thread per n (global id = blockIdx.x*blockDim.x + threadIdx.x)\n    // - Guard: if (n >= N) return;\n    // - Load x[n], y[n], z[n] into registers\n    // - Accumulate over m=0..M-1:\n    //      rmu = rPhi[m]*rD[m] + iPhi[m]*iD[m]\n    //      imu = rPhi[m]*iD[m] - iPhi[m]*rD[m]\n    //      ang = 2*pi*(kx[m]*xn + ky[m]*yn + kz[m]*zn)\n    //      c = cosf(ang); s = sinf(ang)\n    //      r_acc += rmu*c - imu*s\n    //      i_acc += imu*c + rmu*s\n    // - Finally: rFhD[n] += r_acc; iFhD[n] += i_acc;\n\n    // TODO: Implement the FHD accumulation kernel here\n}\n```"}
{"type": "coding", "id": "ch17-fhd-fission-two-kernels-single", "task_dir": "evaluation-tasks/ch17-fhd-fission-two-kernels-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch17-fhd-fission-two-kernels-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch17-fhd-fission-two-kernels-single/student_kernel.cu\n#include <cuda_runtime.h>\n\n// TODO (A): compute_mu_kernel\n// Each thread handles one m (if m < M):\n//   rMu[m] = rPhi[m]*rD[m] + iPhi[m]*iD[m]\n//   iMu[m] = rPhi[m]*iD[m] - iPhi[m]*rD[m]\nextern \"C\" __global__\nvoid compute_mu_kernel(const float* __restrict__ rPhi,\n                       const float* __restrict__ iPhi,\n                       const float* __restrict__ rD,\n                       const float* __restrict__ iD,\n                       int M,\n                       float* __restrict__ rMu,\n                       float* __restrict__ iMu)\n{\n    // TODO: Implement complex multiplication for each m\n    // Hint: int m = blockIdx.x * blockDim.x + threadIdx.x;\n}\n\n// TODO (B): fhd_accumulate_mu_kernel\n// One thread per n; loop over m using precomputed rMu/iMu.\n// Accumulate into rFhD[n], iFhD[n] exactly as in fused version.\nextern \"C\" __global__\nvoid fhd_accumulate_mu_kernel(const float* __restrict__ rMu,\n                              const float* __restrict__ iMu,\n                              const float* __restrict__ kx,\n                              const float* __restrict__ ky,\n                              const float* __restrict__ kz,\n                              const float* __restrict__ x,\n                              const float* __restrict__ y,\n                              const float* __restrict__ z,\n                              int M, int N,\n                              float* __restrict__ rFhD,\n                              float* __restrict__ iFhD)\n{\n    // TODO: Implement accumulation using precomputed rMu, iMu\n    // Similar to ch17-fhd-accumulate-single but use rMu[m], iMu[m] directly\n}\n```"}
{"type": "coding", "id": "ch18-energy-scatter-single", "task_dir": "evaluation-tasks/ch18-energy-scatter-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch18-energy-scatter-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch18-energy-scatter-single / student_kernel.cu\n//\n// Implement Fig. 18.5 (SCATTER): one thread per atom, looping over all (i,j)\n// grid points on a fixed z-slice and ATOMICALLY accumulating into energygrid.\n//\n// CONTRACT\n// - Constant memory holds a *chunk* of atoms: __constant__ float atoms[CHUNK_SIZE*4]\n//   as (x,y,z,charge) AoS, 4 floats per atom.\n// - The test harness uploads atoms chunk-by-chunk via cudaMemcpyToSymbol and then\n//   launches the kernel once per chunk, accumulating into the same output slice.\n// - Kernel params:\n//     energygrid    : pointer to [grid.x * grid.y * grid.z] floats\n//     grid          : logical 3D grid dimensions (x,y,z) for indexing\n//     gridspacing   : spacing used to compute x = i*gridspacing, y = j*gridspacing\n//     z             : world-space z coordinate of the slice (must be multiple of gridspacing)\n//     atoms_in_chunk: number of atoms loaded in constant memory for this launch (<= CHUNK_SIZE)\n//     start_atom    : global offset of first atom of the chunk (not needed for correct math;\n//                     included so your signature matches reference; you may ignore it)\n// - Must use atomicAdd on energygrid writes (scatter means threads collide on same cell).\n// - Bounds: guard i in [0,grid.x), j in [0,grid.y).\n//\n// HINTS\n// - Compute k = int(z / gridspacing) once per kernel; assume z aligns.\n// - Thread id tid covers one atom in [0, atoms_in_chunk).\n// - Load (ax,ay,az,q) from constant memory as atoms[4*tid + {0,1,2,3}].\n// - For each j row, precompute y and (y-ay), (z-az) and reuse (dy*dy + dz*dz).\n// - Use sqrtf for single-precision.\n// - Use 1D blocks and grids for atom threads (e.g., blockDim.x = 256).\n//\n// PERFORMANCE is not graded here—correctness, safety (no OOB), and atomicity are.\n\n#ifndef CHUNK_SIZE\n#define CHUNK_SIZE 256\n#endif\n\n__constant__ float atoms[CHUNK_SIZE * 4];\n\nextern \"C\" __global__\nvoid cenergyScatterKernel(float* __restrict__ energygrid,\n                          dim3 grid,\n                          float gridspacing,\n                          float z,\n                          int atoms_in_chunk,\n                          int /*start_atom_unused*/) {\n    // TODO: Implement scatter kernel (one thread per atom)\n    // 1. Get thread ID and bounds check\n    // 2. Load atom data from constant memory\n    // 3. Compute z-slice index k\n    // 4. Loop over all (i,j) grid points in the slice\n    // 5. For each grid point, compute distance and contribution\n    // 6. Use atomicAdd to accumulate into energygrid\n}\n```"}
{"type": "coding", "id": "ch18-energy-gather-single", "task_dir": "evaluation-tasks/ch18-energy-gather-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch18-energy-gather-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch18-energy-gather-single / student_kernel.cu\n//\n// Implement Fig. 18.6 (GATHER): one thread per grid point (on a fixed z-slice).\n// Each thread loops over all atoms in the current constant-memory chunk and\n// accumulates a private sum, then writes exactly once to energygrid (+=).\n//\n// CONTRACT\n// - Constant memory holds a *chunk* of atoms: __constant__ float atoms[CHUNK_SIZE*4].\n// - Kernel params:\n//     energygrid    : pointer to [grid.x * grid.y * grid.z] floats\n//     grid          : logical 3D grid dimensions (x,y,z) for indexing\n//     gridspacing   : spacing for x=i*h, y=j*h\n//     z             : world-space z of the slice\n//     atoms_in_chunk: number of atoms currently loaded (<= CHUNK_SIZE)\n//     start_atom    : global offset of first atom in the chunk (not needed here)\n// - NO atomics: each thread owns its output cell and does `energygrid[idx] += local_sum`.\n// - 2D launch is expected (e.g., block=(16,16)).\n//\n// HINTS\n// - Compute (dy*dy + dz*dz) per row and reuse where reasonable.\n// - Use sqrtf and a small denominator clamp to avoid division by zero.\n\n#ifndef CHUNK_SIZE\n#define CHUNK_SIZE 256\n#endif\n\n__constant__ float atoms[CHUNK_SIZE * 4];\n\nextern \"C\" __global__\nvoid cenergyGatherKernel(float* __restrict__ energygrid,\n                         dim3 grid,\n                         float gridspacing,\n                         float z,\n                         int atoms_in_chunk,\n                         int /*start_atom_unused*/) {\n    // TODO: Implement gather kernel (one thread per grid cell)\n    // 1. Get 2D thread indices (i,j) for grid position\n    // 2. Bounds check against grid dimensions\n    // 3. Compute world-space coordinates (x,y,z)\n    // 4. Loop over all atoms in the current chunk\n    // 5. For each atom, compute distance and contribution\n    // 6. Accumulate private sum, then write once: energygrid[idx] += sum\n}\n```"}
{"type": "coding", "id": "ch18-energy-gather-coarsened-single", "task_dir": "evaluation-tasks/ch18-energy-gather-coarsened-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch18-energy-gather-coarsened-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch18-energy-gather-coarsened-single / student_kernel.cu\n//\n// TODO: Implement a GATHER kernel with THREAD COARSENING across X (cf. Fig. 18.8).\n//\n// One thread computes COARSEN_FACTOR output cells along +X on the same row j.\n// No atomics: each output cell is owned by exactly one thread.\n//\n// CONSTANT MEMORY LAYOUT\n//   __constant__ float atoms[CHUNK_SIZE*4]; // AoS: x,y,z,q (4 floats per atom)\n//\n// KERNEL PARAMS\n//   energygrid     : [grid.x * grid.y * grid.z] output array\n//   grid           : logical grid dims (x,y,z)\n//   gridspacing    : world-space spacing\n//   z              : world z for the slice (assumed multiple of gridspacing)\n//   atoms_in_chunk : number of atoms currently in constant memory (<= CHUNK_SIZE)\n//   start_atom     : global offset of the first atom in the chunk (not needed here)\n//\n// COARSENING SHAPE\n//   base_i = blockIdx.x * (blockDim.x * COARSEN_FACTOR) + threadIdx.x * COARSEN_FACTOR\n//   j      = blockIdx.y * blockDim.y + threadIdx.y\n//   Each thread computes cells i = base_i + c, for c in [0, COARSEN_FACTOR).\n//\n// REQUIREMENTS\n// - Use sqrtf; clamp denom with fmaxf(denom, 1e-12f) to avoid div-by-zero.\n// - Bounds check i and j.\n// - Single write per output cell: energygrid[idx] += energy_c.\n// - Do not use atomics.\n//\n// HINTS\n// - Precompute dy and dz once per atom per row (reuse dy*dy + dz*dz).\n// - Use a small local array energies[COARSEN_FACTOR] to accumulate.\n\n#ifndef CHUNK_SIZE\n#define CHUNK_SIZE 256\n#endif\n#ifndef COARSEN_FACTOR\n#define COARSEN_FACTOR 8\n#endif\n\n__constant__ float atoms[CHUNK_SIZE * 4];\n\nextern \"C\" __global__\nvoid cenergyCoarsenKernel(float* __restrict__ energygrid,\n                          dim3 grid,\n                          float gridspacing,\n                          float z,\n                          int atoms_in_chunk,\n                          int /*start_atom_unused*/) {\n    int base_i = blockIdx.x * (blockDim.x * COARSEN_FACTOR) + threadIdx.x * COARSEN_FACTOR;\n    int j      = blockIdx.y * blockDim.y + threadIdx.y;\n\n    if (j < 0 || j >= (int)grid.y) return;\n\n    int k = int(z / gridspacing);\n    if (k < 0 || k >= (int)grid.z) return;\n\n    float energies[COARSEN_FACTOR];\n    #pragma unroll\n    for (int c = 0; c < COARSEN_FACTOR; ++c) energies[c] = 0.0f;\n\n    for (int a = 0; a < atoms_in_chunk; ++a) {\n        float ax = atoms[4*a + 0];\n        float ay = atoms[4*a + 1];\n        float az = atoms[4*a + 2];\n        float q  = atoms[4*a + 3];\n\n        float y  = gridspacing * (float)j;\n        float dy = y - ay;\n        float dz = z - az;\n        float dyz2 = dy*dy + dz*dz;\n\n        #pragma unroll\n        for (int c = 0; c < COARSEN_FACTOR; ++c) {\n            int i = base_i + c;\n            if (i >= 0 && i < (int)grid.x) {\n                float x  = gridspacing * (float)i;\n                float dx = x - ax;\n                float denom = sqrtf(dx*dx + dyz2);\n                energies[c] += q / fmaxf(denom, 1e-12f);\n            }\n        }\n    }\n\n    #pragma unroll\n    for (int c = 0; c < COARSEN_FACTOR; ++c) {\n        int i = base_i + c;\n        if (i >= 0 && i < (int)grid.x) {\n            size_t idx = (size_t)grid.x * grid.y * k + (size_t)grid.x * j + (size_t)i;\n            energygrid[idx] += energies[c];\n        }\n    }\n}\n```"}
{"type": "coding", "id": "ch18-energy-gather-coarsened-coalesced-single", "task_dir": "evaluation-tasks/ch18-energy-gather-coarsened-coalesced-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch18-energy-gather-coarsened-coalesced-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch18-energy-gather-coarsened-coalesced-single / student_kernel.cu\n//\n// TODO: Implement a GATHER kernel with THREAD COARSENING + COALESCED WRITES (cf. Fig. 18.10).\n//\n// One thread computes COARSEN_FACTOR output cells along +X on the same row j.\n// All threads in a warp write coalesced to a shared 2D buffer, then flush together.\n// No atomics: each output cell is owned by exactly one thread.\n//\n// CONSTANT MEMORY LAYOUT\n//   __constant__ float atoms[CHUNK_SIZE*4]; // AoS: x,y,z,q (4 floats per atom)\n//\n// KERNEL PARAMS\n//   energygrid     : [grid.x * grid.y * grid.z] output array\n//   grid           : logical grid dims (x,y,z)\n//   gridspacing    : world-space spacing\n//   z              : world z for the slice (assumed multiple of gridspacing)\n//   atoms_in_chunk : number of atoms currently in constant memory (<= CHUNK_SIZE)\n//   start_atom     : global offset of the first atom in the chunk (not needed here)\n//\n// COARSENING + COALESCING SHAPE\n//   base_i = blockIdx.x * (blockDim.x * COARSEN_FACTOR) + threadIdx.x * COARSEN_FACTOR\n//   j      = blockIdx.y * blockDim.y + threadIdx.y\n//   Each thread computes cells i = base_i + c, for c in [0, COARSEN_FACTOR).\n//   Shared memory buffer: [blockDim.y][blockDim.x * COARSEN_FACTOR]\n//   Threads write to shared buffer, then flush coalesced to global memory.\n//\n// REQUIREMENTS\n// - Use sqrtf; clamp denom with fmaxf(denom, 1e-12f) to avoid div-by-zero.\n// - Bounds check i and j.\n// - Use shared memory buffer for coalesced writes.\n// - Single write per output cell: energygrid[idx] += energy_c.\n// - Do not use atomics.\n//\n// HINTS\n// - Precompute dy and dz once per atom per row (reuse dy*dy + dz*dz).\n// - Use a small local array energies[COARSEN_FACTOR] to accumulate.\n// - Write energies to shared buffer, then flush with coalesced access pattern.\n\n#ifndef CHUNK_SIZE\n#define CHUNK_SIZE 256\n#endif\n#ifndef COARSEN_FACTOR\n#define COARSEN_FACTOR 8\n#endif\n\n__constant__ float atoms[CHUNK_SIZE * 4];\n\nextern \"C\" __global__\nvoid cenergyCoarsenCoalescedKernel(float* __restrict__ energygrid,\n                                   dim3 grid,\n                                   float gridspacing,\n                                   float z,\n                                   int atoms_in_chunk,\n                                   int /*start_atom_unused*/) {\n    // Shared memory buffer for coalesced writes\n    __shared__ float sbuf[2][128 * COARSEN_FACTOR];  // [blockDim.y][blockDim.x * COARSEN_FACTOR]\n\n    int base_i = blockIdx.x * (blockDim.x * COARSEN_FACTOR) + threadIdx.x * COARSEN_FACTOR;\n    int j      = blockIdx.y * blockDim.y + threadIdx.y;\n\n    if (j < 0 || j >= (int)grid.y) return;\n\n    int k = int(z / gridspacing);\n    if (k < 0 || k >= (int)grid.z) return;\n\n    float energies[COARSEN_FACTOR];\n    #pragma unroll\n    for (int c = 0; c < COARSEN_FACTOR; ++c) energies[c] = 0.0f;\n\n    for (int a = 0; a < atoms_in_chunk; ++a) {\n        float ax = atoms[4*a + 0];\n        float ay = atoms[4*a + 1];\n        float az = atoms[4*a + 2];\n        float q  = atoms[4*a + 3];\n\n        float y  = gridspacing * (float)j;\n        float dy = y - ay;\n        float dz = z - az;\n        float dyz2 = dy*dy + dz*dz;\n\n        #pragma unroll\n        for (int c = 0; c < COARSEN_FACTOR; ++c) {\n            int i = base_i + c;\n            if (i >= 0 && i < (int)grid.x) {\n                float x  = gridspacing * (float)i;\n                float dx = x - ax;\n                float denom = sqrtf(dx*dx + dyz2);\n                energies[c] += q / fmaxf(denom, 1e-12f);\n            }\n        }\n    }\n\n    // Write to shared memory buffer\n    #pragma unroll\n    for (int c = 0; c < COARSEN_FACTOR; ++c) {\n        int i = base_i + c;\n        if (i >= 0 && i < (int)grid.x) {\n            sbuf[threadIdx.y][threadIdx.x * COARSEN_FACTOR + c] = energies[c];\n        } else {\n            sbuf[threadIdx.y][threadIdx.x * COARSEN_FACTOR + c] = 0.0f;\n        }\n    }\n\n    __syncthreads();\n\n    // Coalesced flush to global memory\n    int tid = threadIdx.y * blockDim.x + threadIdx.x;\n    int total_threads = blockDim.x * blockDim.y;\n    int elements_per_block = blockDim.x * blockDim.y * COARSEN_FACTOR;\n\n    for (int idx = tid; idx < elements_per_block; idx += total_threads) {\n        int local_y = idx / (blockDim.x * COARSEN_FACTOR);\n        int local_x = idx % (blockDim.x * COARSEN_FACTOR);\n\n        int global_j = blockIdx.y * blockDim.y + local_y;\n        int global_i = blockIdx.x * (blockDim.x * COARSEN_FACTOR) + local_x;\n\n        if (global_i >= 0 && global_i < (int)grid.x && global_j >= 0 && global_j < (int)grid.y) {\n            size_t global_idx = (size_t)grid.x * grid.y * k + (size_t)grid.x * global_j + (size_t)global_i;\n            energygrid[global_idx] += sbuf[local_y][local_x];\n        }\n    }\n}\n```"}
{"type": "coding", "id": "ch20-stencil-25pt-single-gpu-single", "task_dir": "evaluation-tasks/ch20-stencil-25pt-single-gpu-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch20-stencil-25pt-single-gpu-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n\n// TODO: Implement a single-GPU axis-aligned 25-point stencil with radius R=4.\n// Contract:\n//  - Input/Output are dense 3D grids (dimx*dimy*dimz), row-major:\n//      idx(i,j,k) = (k*dimy + j)*dimx + i\n//  - For interior cells (i,j,k ∈ [4 .. dim-1-4]) compute:\n//      out = w0*center + Σ_{d=1..4} w[d] * (±d along x + ±d along y + ±d along z)\n//    with weights: w0=0.5, w1=0.10, w2=0.05, w3=0.025, w4=0.0125\n//  - Boundary cells (within 4 of any face) must be copy-through: out=in\n//  - Reasonable 3D launch config and synchronization\n//\n// Suggested steps:\n//  1) write idx3 helper\n//  2) in-kernel boundary test => copy-through\n//  3) accumulate using a small unrolled loop d=1..4\n//  4) host wrapper launches kernel\n\nstatic inline __device__ size_t idx3(int i,int j,int k,int dx,int dy){\n    return (size_t(k)*dy + j)*dx + i;\n}\n\n__global__ void stencil25_kernel(const float* __restrict__ in,\n                                 float* __restrict__ out,\n                                 int dimx, int dimy, int dimz)\n{\n    // TODO\n}\n\nextern \"C\" void stencil25_single_gpu(const float* d_in, float* d_out,\n                                     int dimx, int dimy, int dimz)\n{\n    // TODO: choose block/grid and launch kernel, then cudaDeviceSynchronize()\n}\n```"}
{"type": "coding", "id": "ch20-stencil-25pt-slab-stage1-boundary", "task_dir": "evaluation-tasks/ch20-stencil-25pt-slab-stage1-boundary", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch20-stencil-25pt-slab-stage1-boundary\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n\n// TODO: Implement Stage-1 boundary update for axis-aligned 25-point stencil (R=4)\n// Local z layout has halos: total_z = dimz + 8\n// Owned region z ∈ [4 .. 4+dimz-1]\n// Stage-1 planes: [4..7] and [4+dimz-4 .. 4+dimz-1]\n// Within Stage-1 planes:\n//   - For i/j interior (>=4 and <dim-4) compute stencil\n//   - For i/j near faces, copy-through\n// Do not write interior planes or halos.\n\nstatic inline __device__ size_t idx3(int i,int j,int k,int dx,int dy){\n    return (size_t(k)*dy + j)*dx + i;\n}\n\n__global__ void stencil25_stage1_kernel(const float* __restrict__ in,\n                                        float* __restrict__ out,\n                                        int dimx,int dimy,int dimz)\n{\n    // TODO\n}\n\nextern \"C\" void stencil25_stage1_boundary(const float* d_in, float* d_out,\n                                          int dimx,int dimy,int dimz)\n{\n    // TODO: launch 3D grid over local z ∈ [0 .. dimz+7]\n}\n```"}
{"type": "coding", "id": "ch20-mpi-halo-pack-unpack", "task_dir": "evaluation-tasks/ch20-mpi-halo-pack-unpack", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch20-mpi-halo-pack-unpack\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n\n// TODOs:\n// Implement two host wrappers that launch simple 2D grids:\n//\n// 1) halo_pack_boundaries(d_grid, dimx,dimy,dimz, d_left_send, d_right_send)\n//    - Read 4 owned boundary planes and pack into left/right buffers.\n//      Owned z range is [4 .. 4+dimz-1]. Pack planes:\n//         left : k = 4 + p,           p ∈ [0..3]\n//         right: k = (4+dimz-4) + p,  p ∈ [0..3]\n//    - Packed layout is plane-major then row-major:\n//         pack_idx(p,j,i) = (p*dimy + j)*dimx + i\n//\n// 2) halo_unpack_to_halos(d_grid, dimx,dimy,dimz, d_left_recv, d_right_recv)\n//    - Write left_recv to left halo k = 0..3  (k = 0 + p)\n//    - Write right_recv to right halo k = dimz+4 .. dimz+7 (k = dimz+4 + p)\n//    - Same pack_idx layout for sources.\n//\n// Keep the rest of the grid untouched.\n\nstatic inline __device__ size_t idx3(int i,int j,int k,int dx,int dy){\n    return (size_t(k)*dy + j)*dx + i;\n}\nstatic inline __device__ size_t pack_idx(int p,int j,int i,int dx,int dy){\n    return (size_t(p)*dy + j)*dx + i;\n}\n\n__global__ void k_pack_student(const float* __restrict__ grid,\n                               int dimx,int dimy,int dimz,\n                               float* __restrict__ left_send,\n                               float* __restrict__ right_send)\n{\n    // TODO: implement (mirror reference description above)\n}\n\n__global__ void k_unpack_student(float* __restrict__ grid,\n                                 int dimx,int dimy,int dimz,\n                                 const float* __restrict__ left_recv,\n                                 const float* __restrict__ right_recv)\n{\n    // TODO: implement (mirror reference description above)\n}\n\nextern \"C\" void halo_pack_boundaries(const float* d_grid,\n                                     int dimx,int dimy,int dimz,\n                                     float* d_left_send,\n                                     float* d_right_send)\n{\n    // TODO: choose block(16,16) grid(ceil) and launch k_pack_student, then sync\n}\n\nextern \"C\" void halo_unpack_to_halos(float* d_grid,\n                                     int dimx,int dimy,int dimz,\n                                     const float* d_left_recv,\n                                     const float* d_right_recv)\n{\n    // TODO: choose block(16,16) grid(ceil) and launch k_unpack_student, then sync\n}\n```"}
{"type": "coding", "id": "ch20-stencil-25pt-slab-stage2-interior", "task_dir": "evaluation-tasks/ch20-stencil-25pt-slab-stage2-interior", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch20-stencil-25pt-slab-stage2-interior\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n\n// TODO: Implement Stage-2 interior update for the 25-point stencil (R=4).\n// Local z extent = dimz + 8. Owned z = [4 .. 4+dimz-1].\n// Stage-2 interior planes: k ∈ [8 .. (4+dimz-1)-4].\n// For i/j edges (i<4 || i>=dimx-4 || j<4 || j>=dimy-4) copy-through.\n// Do not touch halos or Stage-1 planes.\n\nstatic inline __device__ size_t idx3(int i,int j,int k,int dx,int dy){\n    return (size_t(k)*dy + j)*dx + i;\n}\n\n__global__ void stencil25_stage2_kernel(const float* __restrict__ in,\n                                        float* __restrict__ out,\n                                        int dimx,int dimy,int dimz)\n{\n    // TODO\n}\n\nextern \"C\" void stencil25_stage2_interior(const float* d_in, float* d_out,\n                                          int dimx,int dimy,int dimz)\n{\n    // TODO: launch 3D grid and synchronize\n}\n```"}
{"type": "coding", "id": "ch20-mpi-stencil-pipeline-naive", "task_dir": "evaluation-tasks/ch20-mpi-stencil-pipeline-naive", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch20-mpi-stencil-pipeline-naive\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n#include <vector>\n#include <cassert>\n#include <cstdio>\n\n// ====== Utilities ======\nstatic inline __host__ __device__\nsize_t idx3(int i,int j,int k,int dx,int dy){ return (size_t(k)*dy + j)*dx + i; }\nstatic inline void ck(cudaError_t e,const char* m){ if(e!=cudaSuccess){fprintf(stderr,\"CUDA %s: %s\\n\",m,cudaGetErrorString(e)); std::exit(2);} }\n\n// ====== Provided stencil kernels & pack/unpack launchers ======\nextern \"C\" void stencil25_stage1_boundary(const float* d_in, float* d_out,\n                                          int dimx,int dimy,int dz_local,\n                                          int z_global_beg, int dimz_total);\nextern \"C\" void stencil25_stage2_interior(const float* d_in, float* d_out,\n                                          int dimx,int dimy,int dz_local);\nextern \"C\" void halo_pack_boundaries(const float* d_slab_out,\n                                     int dimx,int dimy,int dz_local,\n                                     float* d_left_send, float* d_right_send);\nextern \"C\" void halo_unpack_to_halos(float* d_slab_out,\n                                     int dimx,int dimy,int dz_local,\n                                     const float* d_left_recv, const float* d_right_recv);\n\n// ====== Small helpers to scatter/gather between full & slab memory ======\n__global__ void k_scatter_from_full(const float* __restrict__ d_in_full,\n                                    float* __restrict__ d_slab_in,\n                                    int dimx,int dimy,int z0,int dz)\n{\n    int i=blockIdx.x*blockDim.x+threadIdx.x;\n    int j=blockIdx.y*blockDim.y+threadIdx.y;\n    int t=blockIdx.z*blockDim.z+threadIdx.z; // local owned z [0..dz-1]\n    if(i>=dimx||j>=dimy||t>=dz) return;\n    int k_local = 4 + t;\n    int k_full  = z0 + t;\n    d_slab_in[idx3(i,j,k_local,dimx,dimy)] =\n        d_in_full[idx3(i,j,k_full,dimx,dimy)];\n}\n\n__global__ void k_gather_to_full(const float* __restrict__ d_slab_out,\n                                 float* __restrict__ d_out_full,\n                                 int dimx,int dimy,int z0,int dz)\n{\n    int i=blockIdx.x*blockDim.x+threadIdx.x;\n    int j=blockIdx.y*blockDim.y+threadIdx.y;\n    int t=blockIdx.z*blockDim.z+threadIdx.z; // local owned z [0..dz-1]\n    if(i>=dimx||j>=dimy||t>=dz) return;\n    int k_local = 4 + t;\n    int k_full  = z0 + t;\n    d_out_full[idx3(i,j,k_full,dimx,dimy)] =\n        d_slab_out[idx3(i,j,k_local,dimx,dimy)];\n}\n\n// ====== YOUR TASK ======\nextern \"C\" void mpi_stencil_pipeline_naive(const float* d_in_full,\n                                           float* d_out_full,\n                                           int dimx,int dimy,int dimz_total,\n                                           int procs)\n{\n    // TODO: Implement the naive pipeline across `procs` slabs:\n    // 1) Partition: dz = dimz_total / procs (assert divisibility).\n    // 2) For each slab r:\n    //     - cudaMalloc local d_in[r], d_out[r] (size: dimx*dimy*(dz+8))\n    //     - cudaMalloc left/right send & recv buffers (size: dimx*dimy*4)\n    //     - Scatter from d_in_full -> d_in[r] (owned planes k_local=4..)\n    //       using k_scatter_from_full<<<...>>>\n    //     - Seed d_out[r] = d_in[r]  (so copy-through faces remain)\n    // 3) Stage-1: stencil25_stage1_boundary(d_in[r], d_out[r], ..., z0=r*dz, dimz_total)\n    // 4) Pack: halo_pack_boundaries(d_out[r], ..., d_Ls[r], d_Rs[r])\n    // 5) Exchange (simulate MPI): for each r:\n    //       if left exists:  cudaMemcpy(d_Rr[left],  d_Ls[r], bytes, D2D)\n    //       if right exists: cudaMemcpy(d_Lr[right], d_Rs[r], bytes, D2D)\n    // 6) Unpack halos into OUT: halo_unpack_to_halos(d_out[r], ..., d_Lr[r], d_Rr[r])\n    // 7) Stage-2: stencil25_stage2_interior(d_in[r], d_out[r], ...)\n    // 8) Gather owned planes back to d_out_full via k_gather_to_full<<<...>>>\n    // 9) Free all slab buffers.\n    (void)d_in_full; (void)d_out_full; (void)dimx; (void)dimy; (void)dimz_total; (void)procs;\n}\n```"}
{"type": "coding", "id": "ch20-mpi-stencil-pipeline-cudaaware", "task_dir": "evaluation-tasks/ch20-mpi-stencil-pipeline-cudaaware", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch20-mpi-stencil-pipeline-cudaaware\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n#include <cuda_runtime.h>\n#include <vector>\n#include <cassert>\n#include <cstdio>\n\nstatic inline __host__ __device__\nsize_t idx3(int i,int j,int k,int dx,int dy){ return (size_t(k)*dy + j)*dx + i; }\nstatic void ck(cudaError_t e,const char* m){ if(e!=cudaSuccess){fprintf(stderr,\"CUDA %s: %s\\n\",m,cudaGetErrorString(e)); std::exit(2);} }\n\n// Stage 1/2, pack/unpack launchers (provided)\nextern \"C\" void stencil25_stage1_boundary(const float* d_in, float* d_out,\n                                          int dimx,int dimy,int dz_local,\n                                          int z_global_beg, int dimz_total);\nextern \"C\" void stencil25_stage2_interior(const float* d_in, float* d_out,\n                                          int dimx,int dimy,int dz_local);\nextern \"C\" void halo_pack_boundaries(const float* d_slab_out,\n                                     int dimx,int dimy,int dz_local,\n                                     float* d_left_send, float* d_right_send);\nextern \"C\" void halo_unpack_to_halos(float* d_slab_out,\n                                     int dimx,int dimy,int dz_local,\n                                     const float* d_left_recv, const float* d_right_recv);\n\n// CUDA-aware sendrecv wrapper (device->device)\nextern \"C\" void mpi_cudaaware_sendrecv_device(const float* d_sendbuf, int sendcount,\n                                              float* d_recvbuf, int recvcount);\n\n// scatter/gather kernels (provided)\n__global__ void k_scatter_from_full(const float* __restrict__ d_in_full,\n                                    float* __restrict__ d_slab_in,\n                                    int dimx,int dimy,int z0,int dz)\n{\n    int i=blockIdx.x*blockDim.x+threadIdx.x;\n    int j=blockIdx.y*blockDim.y+threadIdx.y;\n    int t=blockIdx.z*blockDim.z+threadIdx.z;\n    if(i>=dimx||j>=dimy||t>=dz) return;\n    int k_local = 4 + t;\n    int k_full  = z0 + t;\n    d_slab_in[idx3(i,j,k_local,dimx,dimy)] =\n        d_in_full[idx3(i,j,k_full,dimx,dimy)];\n}\n__global__ void k_gather_to_full(const float* __restrict__ d_slab_out,\n                                 float* __restrict__ d_out_full,\n                                 int dimx,int dimy,int z0,int dz)\n{\n    int i=blockIdx.x*blockDim.x+threadIdx.x;\n    int j=blockIdx.y*blockDim.y+threadIdx.y;\n    int t=blockIdx.z*blockDim.z+threadIdx.z;\n    if(i>=dimx||j>=dimy||t>=dz) return;\n    int k_local = 4 + t;\n    int k_full  = z0 + t;\n    d_out_full[idx3(i,j,k_full,dimx,dimy)] =\n        d_slab_out[idx3(i,j,k_local,dimx,dimy)];\n}\n\nextern \"C\" void mpi_stencil_pipeline_cudaaware(const float* d_in_full,\n                                               float* d_out_full,\n                                               int dimx,int dimy,int dimz_total,\n                                               int procs)\n{\n    // TODO:\n    // 1) Assert procs>=1 and dimz_total % procs == 0; compute dz.\n    // 2) Allocate per-slab: d_in[r], d_out[r] (size dimx*dimy*(dz+8));\n    //    and d_Ls[r], d_Rs[r], d_Lr[r], d_Rr[r] (each 4*dimx*dimy).\n    // 3) Scatter owned planes from d_in_full to each d_in[r] with k_scatter_from_full.\n    //    Seed d_out[r] = d_in[r] (cudaMemcpyDeviceToDevice).\n    // 4) Stage-1 boundary on OUT using stencil25_stage1_boundary(..., z0=r*dz, dimz_total).\n    // 5) Pack boundary planes from OUT using halo_pack_boundaries(..., d_Ls[r], d_Rs[r]).\n    // 6) CUDA-aware \"sendrecv\":\n    //      if left neighbor exists:  mpi_cudaaware_sendrecv_device(d_Ls[r], Npack, d_Rr[left],  Npack)\n    //      if right neighbor exists: mpi_cudaaware_sendrecv_device(d_Rs[r], Npack, d_Lr[right], Npack)\n    // 7) Unpack into OUT halos: halo_unpack_to_halos(d_out[r], ..., d_Lr[r], d_Rr[r]).\n    // 8) Stage-2 interior on OUT (input is IN): stencil25_stage2_interior(d_in[r], d_out[r], ...).\n    // 9) Gather owned planes back to d_out_full via k_gather_to_full.\n    // 10) Free all allocations.\n    (void)d_in_full; (void)d_out_full; (void)dimx; (void)dimy; (void)dimz_total; (void)procs;\n}\n```"}
{"type": "coding", "id": "ch21-bezier-dp-parent-child-single", "task_dir": "evaluation-tasks/ch21-bezier-dp-parent-child-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch21-bezier-dp-parent-child-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch21-bezier-dp-parent-child-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n\n// Use CUDA builtin float2\n#include <vector_types.h>\n\nstruct BezierLine {\n  float2 CP[3];        // P0, P1, P2\n  float2* vertexPos;   // device buffer (allocated in parent via device malloc)\n  int     nVertices;   // chosen per line in parent\n};\n\n// --- You implement these -----------------------------------------------------\n\n// Geometric curvature proxy: distance from P1 to line P0-P2 (normalized by |P2-P0|)\n// Return non-negative curvature (0 for degenerate segment).\n__device__ float curvature_of(const float2 P0, const float2 P1, const float2 P2) {\n  // TODO: implement robust point-to-segment distance proxy.\n  // Hints:\n  //   v = P2 - P0\n  //   w = P1 - P0\n  //   area2 = |v.x*w.y - v.y*w.x|     (2x triangle area)\n  //   base  = sqrt(v.x*v.x + v.y*v.y)\n  //   curvature ~ area2 / max(base, 1e-8)\n  return 0.0f; // TODO\n}\n\n// Child kernel: compute tessellated positions for one line lidx.\n__global__ void computeBezierLine_child(int lidx, BezierLine* bLines, int nTess) {\n  // TODO:\n  //  - idx = blockIdx.x*blockDim.x + threadIdx.x\n  //  - if idx >= nTess: return\n  //  - u = idx / (nTess-1)   (float)\n  //  - B0=(1-u)^2, B1=2u(1-u), B2=u^2\n  //  - position = B0*P0 + B1*P1 + B2*P2\n  //  - write to bLines[lidx].vertexPos[idx]\n}\n\n// Parent kernel: choose tessellation density, allocate vertex buffers, launch child.\n__global__ void computeBezierLines_parent(BezierLine* bLines, int nLines, int maxTess) {\n  // TODO:\n  //  - lidx = blockIdx.x*blockDim.x + threadIdx.x; if (lidx>=nLines) return\n  //  - compute curvature_of(...)\n  //  - nVerts = clamp( int(curv*16.f)+4, 4, maxTess );\n  //  - bLines[lidx].nVertices = nVerts;\n  //  - bLines[lidx].vertexPos = (float2*)malloc(nVerts * sizeof(float2));\n  //      * if malloc returns nullptr, set nVertices=0 and return.\n  //  - launch child: <<< (nVerts+31)/32, 32 >>> computeBezierLine_child(lidx, bLines, nVerts);\n}\n```"}
{"type": "coding", "id": "ch21-bezier-dp-free-child-buffers", "task_dir": "evaluation-tasks/ch21-bezier-dp-free-child-buffers", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch21-bezier-dp-free-child-buffers\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch21-bezier-dp-free-child-buffers / student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n\n#include <vector_types.h>\n\nstruct BezierLine {\n  float2 CP[3];\n  float2* vertexPos; // device-heap pointer\n  int     nVertices;\n};\n\n// TODO: Implement idempotent free:\n//  - lidx = blockIdx.x*blockDim.x + threadIdx.x; if (lidx>=nLines) return;\n//  - if (bLines[lidx].vertexPos != nullptr) { free(ptr); bLines[lidx].vertexPos = nullptr; }\n//  - (optional) bLines[lidx].nVertices = 0;\n__global__ void freeVertexMem(BezierLine* bLines, int nLines) {\n  // TODO\n}\n```"}
{"type": "coding", "id": "ch21-quadtree-dp-build-single", "task_dir": "evaluation-tasks/ch21-quadtree-dp-build-single", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch21-quadtree-dp-build-single\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch21-quadtree-dp-build-single / student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n\n// ----------------------------- Data types -----------------------------\nstruct Bounds { float minx, miny, maxx, maxy; };\n__device__ __host__ inline Bounds make_bounds(float a,float b,float c,float d){ return Bounds{a,b,c,d}; }\n\nstruct QuadWork {\n  const float* x;\n  const float* y;\n  const int*   idx;         // indices of points for this segment\n  int          begin;       // segment begin (relative to idx)\n  int          count;       // segment length\n  Bounds       b;\n  int          depth;\n  int          max_depth;\n  int          min_points;\n  // outputs/globals\n  int*         perm;        // output permutation (length n)\n  int*         leafOffset;  // length >= n\n  int*         leafCount;   // length >= n\n  int*         leafCounter; // single int in device memory\n  int*         permCursor;  // single int in device memory\n};\n\n// Prototypes\n__global__ void quadtree_build_parent(const float* x, const float* y, int n,\n                                      Bounds root, int max_depth, int min_points,\n                                      int* perm, int* leafOffset, int* leafCount,\n                                      int* leafCounter, int* permCursor,\n                                      const int* idx_root);\n\n__global__ void quadtree_node(QuadWork w);\n\n// ----------------------------- Helpers -----------------------------\n__device__ __host__ inline bool in_NW(float px, float py, const Bounds& b){\n  float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n  return (px < mx) && (py >= my);\n}\n__device__ __host__ inline bool in_NE(float px, float py, const Bounds& b){\n  float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n  return (px >= mx) && (py >= my);\n}\n__device__ __host__ inline bool in_SW(float px, float py, const Bounds& b){\n  float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n  return (px < mx) && (py < my);\n}\n__device__ __host__ inline bool in_SE(float px, float py, const Bounds& b){\n  float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n  return (px >= mx) && (py < my);\n}\n\n__device__ __host__ inline Bounds child_bounds_NW(const Bounds& b){\n  float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n  return make_bounds(b.minx, my, mx, b.maxy);\n}\n__device__ __host__ inline Bounds child_bounds_NE(const Bounds& b){\n  float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n  return make_bounds(mx, my, b.maxx, b.maxy);\n}\n__device__ __host__ inline Bounds child_bounds_SW(const Bounds& b){\n  float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n  return make_bounds(b.minx, b.miny, mx, my);\n}\n__device__ __host__ inline Bounds child_bounds_SE(const Bounds& b){\n  float mx=0.5f*(b.minx+b.maxx), my=0.5f*(b.miny+b.maxy);\n  return make_bounds(mx, b.miny, b.maxx, my);\n}\n\n// ----------------------------- Kernels -----------------------------\n__global__ void quadtree_build_parent(const float* x, const float* y, int n,\n                                      Bounds root, int max_depth, int min_points,\n                                      int* perm, int* leafOffset, int* leafCount,\n                                      int* leafCounter, int* permCursor,\n                                      const int* idx_root)\n{\n  // TODO: Create QuadWork struct and launch root node\n  // if (blockIdx.x != 0 || threadIdx.x != 0) return;\n  // QuadWork w;\n  // w.x = x; w.y = y; w.idx = idx_root;\n  // w.begin = 0; w.count = n;\n  // w.b = root;\n  // w.depth = 0; w.max_depth = max_depth; w.min_points = min_points;\n  // w.perm = perm; w.leafOffset = leafOffset; w.leafCount = leafCount;\n  // w.leafCounter = leafCounter; w.permCursor = permCursor;\n  // quadtree_node<<<1,1>>>(w);\n}\n\n__global__ void quadtree_node(QuadWork w)\n{\n  // TODO: Implement node processing logic\n  // if (threadIdx.x != 0) return; // single-thread control path per node\n  //\n  // const int begin = w.begin, count = w.count;\n  // const Bounds b = w.b;\n  //\n  // // Leaf?\n  // if (w.depth >= w.max_depth || count <= w.min_points) {\n  //   int leafId   = atomicAdd(w.leafCounter, 1);\n  //   int outBegin = atomicAdd(w.permCursor, count);\n  //   w.leafOffset[leafId] = outBegin;\n  //   w.leafCount[leafId]  = count;\n  //   // write perm in original order\n  //   for (int i=0;i<count;i++){\n  //     int pidx = w.idx[begin + i];\n  //     w.perm[outBegin + i] = pidx;\n  //   }\n  //   return;\n  // }\n  //\n  // // Two-pass partition (stable) into a local device buffer\n  // // 1) Count\n  // int cNW=0,cNE=0,cSW=0,cSE=0;\n  // for (int i=0;i<count;i++){\n  //   int id = w.idx[begin+i];\n  //   float px = w.x[id], py = w.y[id];\n  //   if      (in_NW(px,py,b)) cNW++;\n  //   else if (in_NE(px,py,b)) cNE++;\n  //   else if (in_SW(px,py,b)) cSW++;\n  //   else                     cSE++;\n  // }\n  // int sNW=0, sNE=cNW, sSW=cNW+cNE, sSE=cNW+cNE+cSW;\n  //\n  // // 2) Allocate and scatter indices into quadrant order [NW | NE | SW | SE]\n  // int* localIdx = (int*)malloc(sizeof(int)*count);\n  // if (!localIdx){\n  //   // Fallback: emit as a leaf to preserve correctness\n  //   int leafId   = atomicAdd(w.leafCounter, 1);\n  //   int outBegin = atomicAdd(w.permCursor, count);\n  //   w.leafOffset[leafId] = outBegin;\n  //   w.leafCount[leafId]  = count;\n  //   for (int i=0;i<count;i++){\n  //     w.perm[outBegin + i] = w.idx[begin + i];\n  //   }\n  //   return;\n  // }\n  // int pNW=0,pNE=0,pSW=0,pSE=0;\n  // for (int i=0;i<count;i++){\n  //   int id = w.idx[begin+i];\n  //   float px = w.x[id], py = w.y[id];\n  //   if      (in_NW(px,py,b)) localIdx[sNW + (pNW++)] = id;\n  //   else if (in_NE(px,py,b)) localIdx[sNE + (pNE++)] = id;\n  //   else if (in_SW(px,py,b)) localIdx[sSW + (pSW++)] = id;\n  //   else                     localIdx[sSE + (pSE++)] = id;\n  // }\n  //\n  // // 3) Launch children in fixed order: NW, NE, SW, SE (only non-empty)\n  // int off=0;\n  // if (cNW>0){\n  //   QuadWork c=w;\n  //   c.idx=localIdx; c.begin=0; c.count=cNW; c.b=child_bounds_NW(b); c.depth=w.depth+1;\n  //   quadtree_node<<<1,1>>>(c);\n  // }\n  // off += cNW;\n  // if (cNE>0){\n  //   QuadWork c=w;\n  //   c.idx=localIdx; c.begin=off; c.count=cNE; c.b=child_bounds_NE(b); c.depth=w.depth+1;\n  //   quadtree_node<<<1,1>>>(c);\n  // }\n  // off += cNE;\n  // if (cSW>0){\n  //   QuadWork c=w;\n  //   c.idx=localIdx; c.begin=off; c.count=cSW; c.b=child_bounds_SW(b); c.depth=w.depth+1;\n  //   quadtree_node<<<1,1>>>(c);\n  // }\n  // off += cSW;\n  // if (cSE>0){\n  //   QuadWork c=w;\n  //   c.idx=localIdx; c.begin=off; c.count=cSE; c.b=child_bounds_SE(b); c.depth=w.depth+1;\n  //   quadtree_node<<<1,1>>>(c);\n  // }\n  //\n  // // 4) Wait for children to finish and free local buffer\n  // cudaDeviceSynchronize();\n  // free(localIdx);\n}\n```"}
{"type": "coding", "id": "ch21-quadtree-dp-pack-coalesced", "task_dir": "evaluation-tasks/ch21-quadtree-dp-pack-coalesced", "student_file": "student_kernel.cu", "student_targets": ["test_student"], "reference_targets": ["test_reference"], "timeout_sec": 180, "student_exec": "./test_student", "reference_exec": "./test_reference", "question": "Task: ch21-quadtree-dp-pack-coalesced\nTarget file: student_kernel.cu\n\nSkeleton:\n```cuda\n// ch21-quadtree-dp-pack-coalesced / student_kernel.cu\n#include <cuda_runtime.h>\n#include <stdint.h>\n\n__device__ __host__ inline bool in_NW(float px,float py,float minx,float miny,float maxx,float maxy){\n  float mx=0.5f*(minx+maxx), my=0.5f*(miny+maxy);\n  return (px < mx) && (py >= my);\n}\n__device__ __host__ inline bool in_NE(float px,float py,float minx,float miny,float maxx,float maxy){\n  float mx=0.5f*(minx+maxx), my=0.5f*(miny+maxy);\n  return (px >= mx) && (py >= my);\n}\n__device__ __host__ inline bool in_SW(float px,float py,float minx,float miny,float maxx,float maxy){\n  float mx=0.5f*(minx+maxx), my=0.5f*(miny+maxy);\n  return (px < mx) && (py <  my);\n}\n__device__ __host__ inline bool in_SE(float px,float py,float minx,float miny,float maxx,float maxy){\n  float mx=0.5f*(minx+maxx), my=0.5f*(miny+maxy);\n  return (px >= mx) && (py <  my);\n}\n\n// Pack one segment into NW,NE,SW,SE regions of idx_out with stable order.\n// Single block; segCount kept modest in tests.\nextern \"C\" __global__\nvoid pack_quadrants_singleblock(const float* __restrict__ x,\n                                const float* __restrict__ y,\n                                const int*   __restrict__ idx_in,\n                                int*         __restrict__ idx_out,\n                                int segBegin, int segCount,\n                                float minx, float miny, float maxx, float maxy)\n{\n  // TODO: Implement quadrant packing kernel\n  // extern __shared__ int sh[];\n  // int* counts  = sh;        // 4\n  // int* offsets = sh + 4;    // 4\n  // int* cursors = sh + 8;    // 4\n  //\n  // if (threadIdx.x < 4){ counts[threadIdx.x]=0; }\n  // __syncthreads();\n  //\n  // // 1) Count per quadrant\n  // for (int t=threadIdx.x; t<segCount; t+=blockDim.x){\n  //   int id = idx_in[segBegin + t];\n  //   float px = x[id], py = y[id];\n  //   int q = in_NW(px,py,minx,miny,maxx,maxy) ? 0 :\n  //           in_NE(px,py,minx,miny,maxx,maxy) ? 1 :\n  //           in_SW(px,py,minx,miny,maxx,maxy) ? 2 : 3;\n  //   atomicAdd(&counts[q], 1);\n  // }\n  // __syncthreads();\n  //\n  // // 2) Exclusive scan for offsets [NW, NE, SW, SE]\n  // if (threadIdx.x==0){\n  //   offsets[0]=0;\n  //   offsets[1]=counts[0];\n  //   offsets[2]=counts[0]+counts[1];\n  //   offsets[3]=counts[0]+counts[1]+counts[2];\n  //   cursors[0]=cursors[1]=cursors[2]=cursors[3]=0;\n  // }\n  // __syncthreads();\n  //\n  // // 3) Stable scatter: process sequentially to preserve order within quadrants\n  // for (int t=0; t<segCount; t++){\n  //   if (threadIdx.x == 0) {\n  //     int id = idx_in[segBegin + t];\n  //     float px = x[id], py = y[id];\n  //     int q = in_NW(px,py,minx,miny,maxx,maxy) ? 0 :\n  //             in_NE(px,py,minx,miny,maxx,maxy) ? 1 :\n  //             in_SW(px,py,minx,miny,maxx,maxy) ? 2 : 3;\n  //     int pos_in_q = cursors[q]++;\n  //     int out = segBegin + offsets[q] + pos_in_q;\n  //     idx_out[out] = id;\n  //   }\n  // }\n}\n```"}