text
stringlengths 64
2.42M
| id
stringlengths 10
118
| metadata
dict | __index_level_0__
int64 0
65
|
---|---|---|---|
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <iostream>
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/device/gemm_array.h"
#include "cutlass/gemm/device/gemm_batched.h"
#pragma warning( disable : 4503)
/*
This example demonstrates how to use cutlass to compute a batched strided gemm in two different ways:
1. By specifying pointers to the first matrices of the batch and the stride between the consecutive
matrices of the batch (this is called a strided batched gemm).
2. By copying pointers to all matrices of the batch to the device memory (this is called an array gemm).
In this example, both A and B matrix are non-transpose and column major matrix
batched_C = batched_A x batched_B
As an example, matrix C can be seen as
-----------------------------------------------------------
(0,0,0) | (0,0,1) | (0,0,2) | (1,0,0) | (1,0,1) | (1,0,2) |
-----------------------------------------------------------
(0,1,0) | (0,1,1) | (0,1,2) | (1,1,0) | (1,1,1) | (1,1,2) |
-----------------------------------------------------------
(0,2,0) | (0,2,1) | (0,2,2) | (1,2,0) | (1,2,1) | (1,2,2) |
-----------------------------------------------------------
(0,3,0) | (0,3,1) | (0,3,2) | (1,3,0) | (1,3,1) | (1,3,2) |
-----------------------------------------------------------
(0,4,0) | (0,4,1) | (0,4,2) | (1,4,0) | (1,4,1) | (1,4,2) |
-----------------------------------------------------------
(0,5,0) | (0,5,1) | (0,5,2) | (1,5,0) | (1,5,1) | (1,5,2) |
-----------------------------------------------------------
batch 0 | batch 1
where we denote each element with (batch_idx, row_idx, column_idx)
In this example, batch size is 2, M is 6 and N is 3
The stride (batch_stride_C) between the first element of two batches is ldc * n
matrix A can be seen as
---------------------------------------
(0,0,0) | (0,0,1) | (1,0,0) | (1,0,1) |
---------------------------------------
(0,1,0) | (0,1,1) | (1,1,0) | (1,1,1) |
---------------------------------------
(0,2,0) | (0,2,1) | (1,2,0) | (1,2,1) |
---------------------------------------
(0,3,0) | (0,3,1) | (1,3,0) | (1,3,1) |
---------------------------------------
(0,4,0) | (0,4,1) | (1,4,0) | (1,4,1) |
---------------------------------------
(0,5,0) | (0,5,1) | (1,5,0) | (1,5,1) |
---------------------------------------
batch 0 | batch 1
, where batch size is 2, M is 6 and K is 2
The stride (batch_stride_A) between the first element of two batches is lda * k
matrix B can be seen as
-----------------------------
(0,0,0) | (0,0,1) | (0,0,2) |
----------------------------- batch 0
(0,1,0) | (0,1,1) | (0,1,2) |
-------------------------------------
(1,0,0) | (1,0,1) | (1,0,2) |
----------------------------- batch 1
(1,1,0) | (1,1,1) | (1,1,2) |
-----------------------------
, where the batch size is 2, N is 3 and K is 2
The stride (batch_stride_B) between the first element of two batches is k
*/
cudaError_t cutlass_array_sgemm(
int m,
int n,
int k,
float alpha,
float const * const *A,
int lda,
float const * const *B,
int ldb,
float * const *C,
int ldc,
float beta,
int batch_count) {
using Gemm = cutlass::gemm::device::GemmArray<
float, cutlass::layout::ColumnMajor,
float, cutlass::layout::ColumnMajor,
float, cutlass::layout::ColumnMajor
>;
Gemm gemm_op;
cutlass::Status status = gemm_op({
{m, n, k},
A, lda,
B, ldb,
C, ldc,
C, ldc,
{alpha, beta},
batch_count
});
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
return cudaSuccess;
}
cudaError_t cutlass_strided_batched_sgemm(
int m,
int n,
int k,
float alpha,
float const *A,
int lda,
long long int batch_stride_A,
float const *B,
int ldb,
long long int batch_stride_B,
float *C,
int ldc,
long long int batch_stride_C,
float beta,
int batch_count) {
using Gemm = cutlass::gemm::device::GemmBatched<
float, cutlass::layout::ColumnMajor,
float, cutlass::layout::ColumnMajor,
float, cutlass::layout::ColumnMajor
>;
Gemm gemm_op;
cutlass::Status status = gemm_op({
{m, n, k},
{A, lda},
batch_stride_A,
{B, ldb},
batch_stride_B,
{C, ldc},
batch_stride_C,
{C, ldc},
batch_stride_C,
{alpha, beta},
batch_count
});
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
return cudaSuccess;
}
template<typename T>
cudaError_t strided_batched_gemm_nn_reference(
int m,
int n,
int k,
T alpha,
std::vector<T> const &A,
int lda,
long long int batch_stride_A,
std::vector<T> const &B,
int ldb,
long long int batch_stride_B,
std::vector<T> &C,
int ldc,
long long int batch_stride_C,
T beta,
int batch_count) {
/*
strided batched gemm NN
*/
cudaError_t result = cudaSuccess;
if (A.size() < size_t(lda * k * batch_count)) {
std::cout << "the size of A is too small" << std::endl;
return cudaErrorInvalidValue;
}
if (B.size() < size_t(ldb * n)) {
std::cout << "the size of B is too small" << std::endl;
return cudaErrorInvalidValue;
}
if (C.size() < size_t(ldc * n * batch_count)) {
std::cout << "the size of C is too small" << std::endl;
return cudaErrorInvalidValue;
}
for (int batch_idx = 0; batch_idx < batch_count; batch_idx++) {
for (int n_idx = 0; n_idx < n; n_idx++) {
for (int m_idx = 0; m_idx < m; m_idx++) {
T accum = beta * C[batch_idx * batch_stride_C + n_idx * ldc + m_idx];
for (int k_idx = 0; k_idx < k; k_idx++) {
accum += alpha
* A[batch_idx * batch_stride_A + k_idx * lda + m_idx]
* B[batch_idx * batch_stride_B + n_idx * ldb + k_idx];
}
C[batch_idx * batch_stride_C + n_idx * ldc + m_idx] = accum;
}
}
}
return result;
}
cudaError_t run_batched_gemm(bool use_array) {
const char* gemm_desc = use_array ? "array" : "strided batched";
std::cout << "Running " << gemm_desc << " gemm" << std::endl;
// Arbitrary problem size
int const m = 520;
int const n = 219;
int const k = 129;
int const batch_count = 17;
// A, B are non-transpose, column major
int const lda = m;
int const ldb = k * batch_count;
int const ldc = m;
int const count_A = batch_count * lda * k;
int const count_B = ldb * n;
int const count_C = batch_count * ldc * n;
// the memory is batched along K dimension
long long int batch_stride_A = static_cast<long long int>(lda) * static_cast<long long int>(k);
long long int batch_stride_B = static_cast<long long int>(k);
long long int batch_stride_C = static_cast<long long int>(ldc) * static_cast<long long int>(n);
// alpha and beta
float alpha = 1.0f;
float beta = 2.0f;
cudaError_t result = cudaSuccess;
// allocate the host memory
std::vector<float> host_A(count_A);
std::vector<float> host_B(count_B);
std::vector<float> host_C(count_C);
std::vector<float> result_C(count_C);
// allocate the device memory
float *A;
float *B;
float *C;
result = cudaMalloc(&A, count_A * sizeof(float));
if (result != cudaSuccess) {
std::cerr << "cudaMalloc result = " << result << std::endl;
return result;
}
result = cudaMalloc(&B, count_B * sizeof(float));
if (result != cudaSuccess) {
std::cerr << "cudaMalloc result = " << result << std::endl;
return result;
}
result = cudaMalloc(&C, count_C * sizeof(float));
if (result != cudaSuccess) {
std::cerr << "cudaMalloc result = " << result << std::endl;
return result;
}
// Limit range to avoid floating-point errors
int const kRange = 8;
// fill A
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < k; col_idx++) {
for (int row_idx = 0; row_idx < m; row_idx++) {
host_A[row_idx + col_idx * lda + b_idx * lda * k] = static_cast<float>((row_idx + col_idx * lda + b_idx * lda * k) % kRange);
}
}
}
// fill B
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < n; col_idx++) {
for (int row_idx = 0; row_idx < k; row_idx++) {
host_B[row_idx + col_idx * ldb + b_idx * k] = static_cast<float>(((n + k * ldb + batch_count * k) - (row_idx + col_idx * ldb + b_idx * k)) % kRange);
}
}
}
// fill C
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < n; col_idx++) {
for (int row_idx = 0; row_idx < m; row_idx++) {
host_C[row_idx + col_idx * ldc + b_idx * ldc * n] = 1.f;
}
}
}
// ref memory
std::vector<float> ref_A(host_A);
std::vector<float> ref_B(host_B);
std::vector<float> ref_C(host_C);
// copy host memory to device
result = cudaMemcpy(A, host_A.data(), count_A * sizeof(float), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
result = cudaMemcpy(B, host_B.data(), count_B * sizeof(float), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
result = cudaMemcpy(C, host_C.data(), count_C * sizeof(float), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
// run cutlass
if (use_array) {
// allocate the host memory for the pointers to the matrices of the batch
std::vector<float*> host_ptr_A(batch_count);
std::vector<float*> host_ptr_B(batch_count);
std::vector<float*> host_ptr_C(batch_count);
// permute the batch elements to emphasize that GemmArray does not depend on matrices being separated by a fixed stride
std::vector<size_t> permutation = {14, 11, 3, 10, 1, 13, 9, 4, 6, 16, 8, 15, 7, 12, 0, 2, 5};
for (size_t b_idx = 0; b_idx < batch_count; b_idx++) {
host_ptr_A[b_idx] = A + permutation[b_idx] * batch_stride_A;
host_ptr_B[b_idx] = B + permutation[b_idx] * batch_stride_B;
host_ptr_C[b_idx] = C + permutation[b_idx] * batch_stride_C;
}
// allocate the corresponding device memory
float const **ptr_A;
float const **ptr_B;
float **ptr_C;
result = cudaMalloc(&ptr_A, batch_count * sizeof(float*));
if (result != cudaSuccess) {
std::cerr << "cudaMalloc result = " << result << std::endl;
return result;
}
result = cudaMalloc(&ptr_B, batch_count * sizeof(float*));
if (result != cudaSuccess) {
std::cerr << "cudaMalloc result = " << result << std::endl;
return result;
}
result = cudaMalloc(&ptr_C, batch_count * sizeof(float*));
if (result != cudaSuccess) {
std::cerr << "cudaMalloc result = " << result << std::endl;
return result;
}
// copy the matrix pointers to the device
result = cudaMemcpy(ptr_A, host_ptr_A.data(), batch_count * sizeof(float*), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
result = cudaMemcpy(ptr_B, host_ptr_B.data(), batch_count * sizeof(float*), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
result = cudaMemcpy(ptr_C, host_ptr_C.data(), batch_count * sizeof(float*), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
result = cutlass_array_sgemm(m, n, k, alpha, ptr_A, lda, ptr_B, ldb, ptr_C, ldc, beta, batch_count);
if (result != cudaSuccess)
return result;
} else {
result = cutlass_strided_batched_sgemm(
m, n, k, alpha, A, lda, batch_stride_A, B, ldb, batch_stride_B, C, ldc, batch_stride_C,
beta, batch_count);
if (result != cudaSuccess)
return result;
}
// copy device memory to host
result = cudaMemcpy(result_C.data(), C, count_C * sizeof(float), cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
//compare with reference code
result = strided_batched_gemm_nn_reference(m, n, k, alpha, ref_A, lda, batch_stride_A, ref_B, ldb, batch_stride_B, ref_C, ldc, batch_stride_C,
beta, batch_count);
if (result != 0)
return result;
// Expect bit-level accuracy for this simple example
if (ref_C != result_C) {
std::cout << "CUTLASS " << gemm_desc << " gemm does not run correctly" << std::endl;
return cudaErrorUnknown;
}
// free memory
result = cudaFree(A);
if (result != cudaSuccess) {
std::cerr << "cudaFree result = " << result << std::endl;
return result;
}
result = cudaFree(B);
if (result != cudaSuccess) {
std::cerr << "cudaFree result = " << result << std::endl;
return result;
}
result = cudaFree(C);
if (result != cudaSuccess) {
std::cerr << "cudaFree result = " << result << std::endl;
return result;
}
return result;
}
int main() {
cudaError_t result = cudaSuccess;
for (bool use_array : {false, true}) {
result = run_batched_gemm(use_array);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
} else {
break;
}
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
| examples/05_batched_gemm/batched_gemm.cu/0 | {
"file_path": "examples/05_batched_gemm/batched_gemm.cu",
"repo_id": "examples",
"token_count": 6001
} | 0 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include "device/b2b_gemm.h"
#include "b2b_gemm_run.h"
#include "test_run.h"
////////////////////////////////////////////////////////////////////////////////
cutlass::gemm::GemmCoord gemm_f16_sm75_problem_size_0(128*640, 64, 576);
cutlass::gemm::GemmCoord gemm_f16_sm75_problem_size_1(128*640, 128, 64);
bool run_nonfused_gemm_f16() {
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
ElementCompute alpha0 = ElementCompute(1);
ElementCompute beta0 = ElementCompute(1); //beta = 1 for bias
ElementCompute alpha1 = ElementCompute(1);
ElementCompute beta1 = ElementCompute(1); //beta = 1 for bias
using ThreadblockShape0 = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape0 = cutlass::gemm::GemmShape<32, 32, 32>;
using ThreadblockShape1 = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape1 = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Gemm0 = cutlass::gemm::device::Gemm<
cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::half_t,
cutlass::layout::ColumnMajor,
ElementOutput,
cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm75,
ThreadblockShape0,
WarpShape0,
InstructionShape,
cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
2
>;
using Gemm1 = cutlass::gemm::device::Gemm<
cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::half_t,
cutlass::layout::ColumnMajor,
ElementOutput,
cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm75,
ThreadblockShape1,
WarpShape1,
InstructionShape,
cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
2
>;
B2bNonFusedGemmRun<Gemm0, Gemm1> nonFusedGemm;
std::cout << "Running Non-fused back-to-back FP16 TN GEMMs...\n";
bool pass = nonFusedGemm.run(gemm_f16_sm75_problem_size_0, gemm_f16_sm75_problem_size_1, alpha0, beta0, alpha1, beta1);
if(pass)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return pass;
}
bool run_fused_gemm_f16_rf_res() {
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
ElementCompute alpha0 = ElementCompute(1);
//Fused kernel has built-in bias, setting beta=0
ElementCompute beta0 = ElementCompute(0);
ElementCompute alpha1 = ElementCompute(1);
ElementCompute beta1 = ElementCompute(1); //beta=1 for bias
using ThreadblockShape0 = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape0 = cutlass::gemm::GemmShape<32, 64, 32>;
using ThreadblockShape1 = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape1 = cutlass::gemm::GemmShape<32, 128, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using EpilogueOutputOp0 =
cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput,
InstructionShape::kM * InstructionShape::kN / 32,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>;
using EpilogueOutputOp1 =
cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>;
using B2bGemm = cutlass::gemm::device::B2bGemm<
cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::half_t,
cutlass::layout::ColumnMajor,
ElementOutput,
cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm75,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
2
>;
B2bFusedGemmRun<B2bGemm> fusedGemm;
std::cout << "Running Fused back-to-back FP16 TN GEMMs with RF Residency...\n";
bool passed = fusedGemm.run(gemm_f16_sm75_problem_size_0, gemm_f16_sm75_problem_size_1, alpha0, beta0, alpha1, beta1);
if(passed)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return passed;
}
int main() {
std::vector<bool (*)()>funcs = {
&run_nonfused_gemm_f16,
&run_fused_gemm_f16_rf_res
};
return testRun(75, funcs, "gemm f16 RF residency");
}
///////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/fused_two_gemms_f16_sm75_rf.cu/0 | {
"file_path": "examples/13_two_tensor_op_fusion/fused_two_gemms_f16_sm75_rf.cu",
"repo_id": "examples",
"token_count": 2640
} | 1 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/transform/threadblock/predicated_vector_access_iterator.h"
#include "cutlass/transform/threadblock/vector_iterator.h"
#include "cutlass/transform/warp/vector_fragment_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
#include "kernel/default_b2b_conv2d_fprop.h"
#include "kernel/b2b_implicit_gemm_convolution.h"
#include "threadblock/b2b_implicit_gemm_multistage_smem_accumulator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
/// Accumulator will be staged in shared memory.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag
>
struct DefaultB2bConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
true
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, LayoutA,
ThreadMapA0
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, LayoutB,
ThreadMapB0
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 2;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Define iterators over tiles from the B operand
using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, LayoutB,
ThreadMapB1
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp;
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Use fragment iterator for the accumulator
using SmemAccumulatorLayout = cutlass::layout::RowMajor;
using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
WarpShape0, InstructionShape,
ElementAccumulator,
typename WarpMmaTensorOp0::Policy::Operator::FragmentC,
SmemAccumulatorLayout
>;
// Store Accumulator tiles to Shared Memory
using SmemIteratorD0 =
cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape0,
InstructionShape,
ElementC,
SmemAccumulatorLayout
>;
static int const kThreadCount = 32;
// load warp tile from Shared Memory accumulator
using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileIterator<
MatrixShape<WarpShape1::kM, InstructionShape::kK>, cutlass::gemm::Operand::kA,
ElementA, SmemAccumulatorLayout,
MatrixShape<InstructionShape::kM, InstructionShape::kK>,
WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount>;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmMultistageSmemAccumulator<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
arch::CacheOperation::Always,
IteratorB0,
SmemIteratorB0,
arch::CacheOperation::Global,
IteratorAccumulatorScaleBias,
FragmentIteratorAccumulator,
SmemIteratorD0,
ThreadblockShape1,
WarpIteratorA1,
IteratorB1,
SmemIteratorB1,
arch::CacheOperation::Global,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline with interleaved layout.
/// Accumulator will be staged in shared memory.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int InterleavedK
>
struct DefaultB2bConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
true
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapA0 = typename MmaCore0::SmemThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA0
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapB0 = typename MmaCore0::SmemThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB0
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 4;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
using ThreadMapB1 = typename MmaCore1::SmemThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB1
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp;
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Use fragment iterator for the accumulator
using SmemAccumulatorLayout = cutlass::layout::ColumnMajorInterleaved<16>;
using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
WarpShape0, InstructionShape,
ElementAccumulator,
typename WarpMmaTensorOp0::Policy::Operator::FragmentC,
SmemAccumulatorLayout
>;
// Store Accumulator tiles to Shared Memory
using SmemIteratorD0 =
cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape0,
InstructionShape,
ElementC,
SmemAccumulatorLayout
>;
static int const kThreadCount = 32;
// load warp tile from Shared Memory accumulator
using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileIteratorCanonical<
MatrixShape<WarpShape1::kM, InstructionShape::kK>, cutlass::gemm::Operand::kA,
ElementA, SmemAccumulatorLayout,
MatrixShape<InstructionShape::kM, InstructionShape::kK>,
WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount>;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmMultistageSmemAccumulator<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
arch::CacheOperation::Always,
IteratorB0,
SmemIteratorB0,
arch::CacheOperation::Global,
IteratorAccumulatorScaleBias,
FragmentIteratorAccumulator,
SmemIteratorD0,
ThreadblockShape1,
WarpIteratorA1,
IteratorB1,
SmemIteratorB1,
arch::CacheOperation::Global,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm and
/// multistage pipeline.
/// Accumulator will be staged in shared memory.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag
>
struct DefaultB2bConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
true
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, LayoutA,
ThreadMapA0
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, LayoutB,
ThreadMapB0
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 2;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Define iterators over tiles from the B operand
using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, LayoutB,
ThreadMapB1
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp;
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Use fragment iterator for the accumulator
using SmemAccumulatorLayout = cutlass::layout::RowMajor;
using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
WarpShape0, InstructionShape,
ElementAccumulator,
typename WarpMmaTensorOp0::Policy::Operator::FragmentC,
SmemAccumulatorLayout
>;
// Store Accumulator tiles to Shared Memory
using SmemIteratorD0 =
cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape0,
InstructionShape,
ElementC,
SmemAccumulatorLayout
>;
static int const kThreadCount = 32;
// load warp tile from Shared Memory accumulator
using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileIterator<
MatrixShape<WarpShape1::kM, InstructionShape::kK>, cutlass::gemm::Operand::kA,
ElementA, SmemAccumulatorLayout,
MatrixShape<InstructionShape::kM, InstructionShape::kK>,
WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount>;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmMultistageSmemAccumulator<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
arch::CacheOperation::Always,
IteratorB0,
SmemIteratorB0,
arch::CacheOperation::Global,
IteratorAccumulatorScaleBias,
FragmentIteratorAccumulator,
SmemIteratorD0,
ThreadblockShape1,
WarpIteratorA1,
IteratorB1,
SmemIteratorB1,
arch::CacheOperation::Global,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and
// multistage pipeline with interleaved layout.
/// Accumulator will be staged in shared memory.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int InterleavedK
>
struct DefaultB2bConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
true
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapA0 = typename MmaCore0::SmemThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA0
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapB0 = typename MmaCore0::SmemThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB0
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 4;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
using ThreadMapB1 = typename MmaCore1::SmemThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB1
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp;
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Use fragment iterator for the accumulator
using SmemAccumulatorLayout = cutlass::layout::ColumnMajorInterleaved<16>;
using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
WarpShape0, InstructionShape,
ElementAccumulator,
typename WarpMmaTensorOp0::Policy::Operator::FragmentC,
SmemAccumulatorLayout
>;
// Store Accumulator tiles to Shared Memory
using SmemIteratorD0 =
cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape0,
InstructionShape,
ElementC,
SmemAccumulatorLayout
>;
static int const kThreadCount = 32;
// load warp tile from Shared Memory accumulator
using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileIteratorCanonical<
MatrixShape<WarpShape1::kM, InstructionShape::kK>, cutlass::gemm::Operand::kA,
ElementA, SmemAccumulatorLayout,
MatrixShape<InstructionShape::kM, InstructionShape::kK>,
WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount>;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmMultistageSmemAccumulator<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
arch::CacheOperation::Always,
IteratorB0,
SmemIteratorB0,
arch::CacheOperation::Global,
IteratorAccumulatorScaleBias,
FragmentIteratorAccumulator,
SmemIteratorD0,
ThreadblockShape1,
WarpIteratorA1,
IteratorB1,
SmemIteratorB1,
arch::CacheOperation::Global,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop_smem_accumulator_sm80.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop_smem_accumulator_sm80.h",
"repo_id": "examples",
"token_count": 9829
} | 2 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h"
#include "cutlass/transform/threadblock/predicated_vector_access_iterator.h"
#include "cutlass/transform/threadblock/vector_iterator.h"
#include "cutlass/transform/warp/vector_fragment_iterator.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
#include "threadblock/b2b_mma_pipelined.h"
#include "threadblock/b2b_mma_multistage.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Staging the accumulators in shared memory.
bool SmemAccumulator = false>
struct DefaultB2bMma;
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output with 2-stage pipeline
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Epilogue output operator
typename EpilogueOutputOp>
struct DefaultB2bMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, ArchTag,
ThreadblockShape0, ThreadblockShape1,
WarpShape0, WarpShape1,
InstructionShape, 2, Operator, EpilogueOutputOp, false> {
// Define the MmaCore components
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, 2, Operator>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA0 =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore0::Shape::kM, MmaCore0::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore0::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB0 =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore0::Shape::kK, MmaCore0::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore0::IteratorThreadMapB, kAlignmentB>;
// Use fragment iterator for A operand
using AccumulatorLayout = cutlass::layout::ColumnMajor;
using FragmentIteratorA1 =
cutlass::gemm::warp::MmaTensorOpFragmentIterator<
cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape
cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape
MmaCore1::Shape::kK, //kBlocksColumn
ElementAccumulator, ElementA, AccumulatorLayout, InstructionShape, EpilogueOutputOp>;
using ElementScaleBias = typename EpilogueOutputOp::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 2;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Warp-level iterators to load scale and bias vectors
using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator<
MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias,
LayoutScaleBias, InstructionShape, kElementsPerAccess>;
// Define iterators over tiles from the B operand
using IteratorB1 =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore1::Shape::kK, MmaCore1::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore1::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockB2bMma = cutlass::gemm::threadblock::B2bMmaPipelined<
typename MmaCore0::Shape, IteratorA0, typename MmaCore0::SmemIteratorA,
IteratorB0, typename MmaCore0::SmemIteratorB,
typename MmaCore1::Shape, FragmentIteratorA1,
IteratorAccumulatorScaleBias, FragmentIteratorA1ScaleBias,
IteratorB1, typename MmaCore1::SmemIteratorB,
ElementAccumulator, layout::RowMajor,
EpilogueOutputOp,
typename MmaCore0::MmaPolicy, typename MmaCore1::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output for multi-stage
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Epilogue output operator
typename EpilogueOutputOp>
struct DefaultB2bMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, ArchTag,
ThreadblockShape0, ThreadblockShape1,
WarpShape0, WarpShape1,
InstructionShape, Stages, Operator, EpilogueOutputOp, false> {
static cutlass::arch::CacheOperation::Kind const CacheOpA =
((sizeof_bits<ElementA>::value * kAlignmentA) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * kAlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the MmaCore components
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, Operator, false, CacheOpA, CacheOpB>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, Operator, false, CacheOpA, CacheOpB>;
// Define iterators over tiles from the A operand
using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA;
using AccessTypeA0 = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA0 =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, LayoutA, 1, ThreadMapA0, AccessTypeA0>;
// Define iterators over tiles from the B operand
using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB;
using AccessTypeB0 = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB0 =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, LayoutB, 0, ThreadMapB0, AccessTypeB0>;
// Use fragment iterator for A operand
using AccumulatorLayout = cutlass::layout::ColumnMajor;
using FragmentIteratorA1 =
cutlass::gemm::warp::MmaTensorOpFragmentIterator<
cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape
cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape
MmaCore1::Shape::kK, //kBlocksColumn
ElementAccumulator, ElementA, AccumulatorLayout, InstructionShape, EpilogueOutputOp>;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 2;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Warp-level iterators to load scale and bias vectors
using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator<
MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias,
LayoutScaleBias, InstructionShape, kElementsPerAccess>;
// Define iterators over tiles from the B operand
using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB;
using AccessTypeB1 = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB1 =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, LayoutB, 0, ThreadMapB1, AccessTypeB1>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockB2bMma = cutlass::gemm::threadblock::B2bMmaMultistage<
typename MmaCore0::Shape, IteratorA0, typename MmaCore0::SmemIteratorA,
MmaCore0::kCacheOpA,
IteratorB0, typename MmaCore0::SmemIteratorB, MmaCore0::kCacheOpB,
typename MmaCore1::Shape, FragmentIteratorA1,
IteratorAccumulatorScaleBias, FragmentIteratorA1ScaleBias,
IteratorB1, typename MmaCore1::SmemIteratorB, MmaCore1::kCacheOpB,
ElementAccumulator, layout::RowMajor,
EpilogueOutputOp,
typename MmaCore0::MmaPolicy, typename MmaCore1::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for column-major-interleaved output with 2-stage pipeline
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename OperatorClass,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Number of Interleaved K
int InterleavedK>
struct DefaultB2bMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, arch::Sm75,
ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1,
InstructionShape, 2, Operator, EpilogueOutputOp, true> {
// Define the MmaCore components
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, 2, Operator,
true>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, 2, Operator,
true>;
static_assert(kAlignmentA == 128 / sizeof_bits<ElementA>::value,
"Alignment must match thread data map's vector length");
static_assert(kAlignmentB ==128 / sizeof_bits<ElementB>::value,
"Alignment must match thread data map's vector length");
// Define iterators over tiles from the A operand
using IteratorA0 = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore0::Shape::kM, MmaCore0::Shape::kK>, ElementA,
LayoutA, 1, typename MmaCore0::IteratorThreadMapA>;
// Define iterators over tiles from the B operand
using IteratorB0 = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore0::Shape::kK, MmaCore0::Shape::kN>, ElementB,
LayoutB, 0, typename MmaCore0::IteratorThreadMapB>;
// Use fragment iterator for A1 operand
using AccumulatorLayout = cutlass::layout::RowMajor; //AccumulatorsInRowMajor = true
using FragmentIteratorA1 =
cutlass::gemm::warp::MmaTensorOpFragmentIterator<
cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape
cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape
MmaCore1::Shape::kK, //kBlocksColumn
ElementAccumulator, ElementA, AccumulatorLayout,
InstructionShape, EpilogueOutputOp>;
using ElementScaleBias = typename EpilogueOutputOp::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 4;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Warp-level iterators to load scale and bias vectors
using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator<
MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias,
LayoutScaleBias, InstructionShape, kElementsPerAccess>;
// Define iterators over tiles from the B operand
using IteratorB1 =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore1::Shape::kK, MmaCore1::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore1::IteratorThreadMapB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockB2bMma = cutlass::gemm::threadblock::B2bMmaPipelined<
typename MmaCore0::Shape, IteratorA0, typename MmaCore0::SmemIteratorA,
IteratorB0, typename MmaCore0::SmemIteratorB,
typename MmaCore1::Shape, FragmentIteratorA1,
IteratorAccumulatorScaleBias, FragmentIteratorA1ScaleBias,
IteratorB1, typename MmaCore1::SmemIteratorB,
ElementAccumulator, layout::ColumnMajorInterleaved<InterleavedK>,
EpilogueOutputOp,
typename MmaCore0::MmaPolicy, typename MmaCore1::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for column-major-interleaved output with multi-stage
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Number of Interleaved K
int InterleavedK>
struct DefaultB2bMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, ArchTag,
ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1,
InstructionShape, Stages, Operator, EpilogueOutputOp, true> {
// Define the MmaCore components
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, Stages,
Operator, true>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, Stages,
Operator, true>;
// Define iterators over tiles from the A operand
using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA0 =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, LayoutA, 1, ThreadMapA0, AccessTypeA>;
// Define iterators over tiles from the B operand
using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB0 =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, LayoutB, 0, ThreadMapB0, AccessTypeB>;
// Use fragment iterator for A1 operand
using AccumulatorLayout = cutlass::layout::RowMajor; //AccumulatorsInRowMajor = true
using FragmentIteratorA1 =
cutlass::gemm::warp::MmaTensorOpFragmentIterator<
cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape
cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape
MmaCore1::Shape::kK, //kBlocksColumn
ElementAccumulator, ElementA, AccumulatorLayout,
InstructionShape, EpilogueOutputOp>;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 4;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Warp-level iterators to load scale and bias vectors
using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator<
MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias,
LayoutScaleBias, InstructionShape, kElementsPerAccess>;
// Define iterators over tiles from the B operand
using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB;
using IteratorB1 =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, LayoutB, 0, ThreadMapB1, AccessTypeB>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockB2bMma = cutlass::gemm::threadblock::B2bMmaMultistage<
typename MmaCore0::Shape, IteratorA0, typename MmaCore0::SmemIteratorA,
MmaCore0::kCacheOpA,
IteratorB0, typename MmaCore0::SmemIteratorB, MmaCore0::kCacheOpB,
typename MmaCore1::Shape, FragmentIteratorA1,
IteratorAccumulatorScaleBias, FragmentIteratorA1ScaleBias,
IteratorB1, typename MmaCore1::SmemIteratorB, MmaCore1::kCacheOpB,
ElementAccumulator, layout::ColumnMajorInterleaved<InterleavedK>,
EpilogueOutputOp,
typename MmaCore0::MmaPolicy, typename MmaCore1::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/threadblock/default_b2b_mma.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/threadblock/default_b2b_mma.h",
"repo_id": "examples",
"token_count": 9284
} | 3 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example requires NVIDIA Ampere GPU or later.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// CUTLASS Includes
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
// CUTLASS Utility Includes
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/gemm_complex.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// Define the overal warp-level problem shape
int const kM = 27;
int const kN = 31;
int const kK = 17;
///////////////////////////////////////////////////////////////////////////////////////////////////
// Define a warp-level GEMM operator.
//
// This template could be part of the CUTLASS Template Library or implemented internally. This
// wraps the matrix multiply operation and epilogue with a GEMM-like interface that can be
// instantiated in device code.
namespace cutlass {
namespace gemm {
namespace warp {
template <
typename Shape,
typename InstructionShape,
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementScalar
>
class GemmTensorOp {
public:
using WarpShape = GemmShape<
((Shape::kM + InstructionShape::kM - 1) / InstructionShape::kM) * InstructionShape::kM,
((Shape::kN + InstructionShape::kN - 1) / InstructionShape::kN) * InstructionShape::kN,
InstructionShape::kK
>;
using MmaWarp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape,
InstructionShape,
double, // Data type of A elements
cutlass::layout::RowMajor, // Layout of A matrix
double, // Data type of B elements
cutlass::layout::ColumnMajor, // Layout of B matrix
double, // Data type of C elements
cutlass::layout::RowMajor // Layout of C matrix
>::Type;
// Number of 'K groups'
int const kKgroups = (Shape::kK + InstructionShape::kK - 1) / InstructionShape::kK;
// Define a 'FragmentIterator' to iterate over slices of accumulators
using FragmentIterator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename MmaWarp::Shape,
InstructionShape,
double,
typename MmaWarp::Policy::Operator::FragmentC,
cutlass::layout::RowMajor
>;
// Define an epilogue 'Tile Iteterator' to iterate over slices of elements in Shared Memory
using AccumulatorTileIterator = cutlass::epilogue::warp::TileIteratorTensorOpCanonical<
typename MmaWarp::Shape,
InstructionShape,
double,
cutlass::layout::RowMajor
>;
using TensorRefA = typename MmaWarp::IteratorA::TensorRef;
using TensorRefB = typename MmaWarp::IteratorB::TensorRef;
using TensorRefC = typename AccumulatorTileIterator::TensorRef;
public:
CUTLASS_HOST_DEVICE
GemmTensorOp() { }
CUTLASS_DEVICE
void operator()(
ElementScalar alpha,
TensorRefA ref_A,
TensorRefB ref_B,
ElementScalar beta,
TensorRefC ref_C,
TensorRefC ref_D,
int lane_id) const {
// Instantiate iterators pointing to slices of the A and B matrices in shared memory
typename MmaWarp::IteratorA iter_A(ref_A, {Shape::kM, Shape::kK}, lane_id);
typename MmaWarp::IteratorB iter_B(ref_B, {Shape::kK, Shape::kN}, lane_id);
// Instantiate and clear accumulator tile holding the C matrix
typename MmaWarp::FragmentC accum;
accum.clear();
// Instantiate the warp-level matrix multiply operator
MmaWarp mma_op;
// Instantiate fragments holding the slice of the matrix held by each warp
typename MmaWarp::FragmentA frag_A[2];
typename MmaWarp::FragmentB frag_B[2];
// Load fragments from shared memory
iter_A.load(frag_A[0]);
iter_B.load(frag_B[0]);
++iter_A;
++iter_B;
// Load fragments from shared memory
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < kKgroups; ++k) {
// Load fragments from shared memory
iter_A.load(frag_A[(k + 1) % 2]);
iter_B.load(frag_B[(k + 1) % 2]);
++iter_A;
++iter_B;
// Compute the matrix multiply
mma_op(accum, frag_A[k % 2], frag_B[k % 2], accum);
}
// Instantiate iterators
FragmentIterator accum_frag_it(accum);
AccumulatorTileIterator source_tile_it(ref_C, {Shape::kM, Shape::kN}, lane_id);
AccumulatorTileIterator dest_tile_it(ref_D, {Shape::kM, Shape::kN}, lane_id);
// Define function objects for linear scaling operation
cutlass::multiplies<typename FragmentIterator::Fragment> mul_source;
cutlass::multiply_add<typename FragmentIterator::Fragment> mul_add_accumulator;
// Iterate over the epilogue components
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < FragmentIterator::kIterations; ++idx) {
// Define storage for slices of the accumulators
typename FragmentIterator::Fragment accum_fragment;
typename FragmentIterator::Fragment source_fragment;
// Select a slice of accumulators from the accumulator tile
accum_frag_it.load(accum_fragment);
++accum_frag_it;
// Load a corresponding slice from Shared memory
source_tile_it.load(source_fragment);
++source_tile_it;
// Compute linear scaling - alpha * AB + beta * C
source_fragment = mul_source(beta, source_fragment);
accum_fragment = mul_add_accumulator(alpha, accum_fragment, source_fragment);
// Store the result to shared memory
dest_tile_it.store(accum_fragment);
++dest_tile_it;
}
}
};
} // namespace warp
} // namespace gemm
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
// Sample kernel demonstrating a collective GEMM operation by a warp on arbitrary matrices held
// in Shared Memory.
__global__ void kernel(
double *D_gmem,
double alpha,
double const *A_gmem,
double const *B_gmem,
double beta,
double const *C_gmem) {
// Define several matrices in shared memory
__shared__ double A[kM][kK];
__shared__ double B[kN][kK];
__shared__ double C[kM][kN];
// Copy data into SMEM
if (threadIdx.x == 0) {
CUTLASS_PRAGMA_NO_UNROLL
for (int m = 0; m < kM; ++m) {
for (int k = 0; k < kK; ++k) {
A[m][k] = A_gmem[m * kK + k];
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int n = 0; n < kN; ++n) {
for (int k = 0; k < kK; ++k) {
B[n][k] = B_gmem[n * kK + k];
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int m = 0; m < kM; ++m) {
CUTLASS_PRAGMA_NO_UNROLL
for (int n = 0; n < kN; ++n) {
C[m][n] = C_gmem[m * kN + n];
}
}
}
__syncthreads();
//
// Instantiate a warp-level matrix multiply operator given the fundamental instruction shape (8x8x4),
// overall shape, data type of each operand, and layout of each operand.
//
using GemmTensorOp = cutlass::gemm::warp::GemmTensorOp<
cutlass::gemm::GemmShape<kM, kN, kK>,
cutlass::gemm::GemmShape<8, 8, 4>,
double, // Data type of A elements
cutlass::layout::RowMajor, // Layout of A matrix
double, // Data type of B elements
cutlass::layout::ColumnMajor, // Layout of B matrix
double, // Data type of C elements
cutlass::layout::RowMajor, // Layout of C matrix
double // Scalar type of alpha and beta
>;
// Instantiate the GEMM operator
GemmTensorOp gemm;
// Execute the warp-level GEMM operation
gemm(
alpha,
{&A[0][0], kK},
{&B[0][0], kK},
beta,
{&C[0][0], kN},
{&C[0][0], kN},
threadIdx.x);
__syncthreads();
// Copy data into SMEM
if (threadIdx.x == 0) {
CUTLASS_PRAGMA_NO_UNROLL
for (int m = 0; m < kM; ++m) {
CUTLASS_PRAGMA_NO_UNROLL
for (int n = 0; n < kN; ++n) {
D_gmem[m * kN + n] = C[m][n];
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to canonical warp-level GEMM operation
int main(int argc, const char *arg[]) {
bool notSupported = false;
// CUTLASS must be compiled with CUDA 11 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "This example requires compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Return 0 so tests are considered passing if run on unsupported platforms.
return 0;
}
cutlass::HostTensor<double, cutlass::layout::RowMajor> A({kM, kK});
cutlass::HostTensor<double, cutlass::layout::ColumnMajor> B({kK, kN});
cutlass::HostTensor<double, cutlass::layout::RowMajor> C({kM, kN});
cutlass::HostTensor<double, cutlass::layout::RowMajor> D({kM, kN});
uint64_t seed = 2020;
double max = 8;
double min = -8;
cutlass::reference::host::TensorFillRandomUniform(
A.host_view(),
seed,
max,
min,
0
);
cutlass::reference::host::TensorFillRandomUniform(
B.host_view(),
seed + 17,
max,
min,
0
);
cutlass::reference::host::TensorFillRandomUniform(
C.host_view(),
seed + 31,
max,
min,
0
);
A.sync_device();
B.sync_device();
C.sync_device();
D.sync_device();
dim3 grid(1,1);
dim3 block(32, 1, 1);
double alpha = 2.25;
double beta = 1.24;
kernel<<< grid, block >>>(
D.device_data(),
alpha,
A.device_data(),
B.device_data(),
beta,
C.device_data()
);
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Failed to synchronize device after kernel launch." << std::endl;
return -1;
}
D.sync_host();
// Compute reference on host
cutlass::HostTensor<double, cutlass::layout::RowMajor> D_ref({kM, kN}, false);
cutlass::reference::host::GemmComplex(
{kM, kN, kK},
alpha,
A.host_ref(),
cutlass::ComplexTransform::kNone,
B.host_ref(),
cutlass::ComplexTransform::kNone,
beta,
C.host_ref(),
D_ref.host_ref(),
double()
);
// Verify reference matches computed
if (!cutlass::reference::host::TensorEquals(
D.host_view(),
D_ref.host_view())) {
std::cerr
<< "A =\n" << A.host_view()
<< "\n\nB = \n" << B.host_view()
<< "\n\nC = " << C.host_view()
<< "\n\nRef =\n" << D_ref.host_view()
<< "\n\nD =\n" << D.host_view() << "\n\n";
std::cerr << "Error - device results mismatch host reference." << std::endl;
return -1;
}
std::cout << "Passed" << std::endl;
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| examples/19_tensorop_canonical/tensorop_canonical.cu/0 | {
"file_path": "examples/19_tensorop_canonical/tensorop_canonical.cu",
"repo_id": "examples",
"token_count": 5070
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
NVIDIA Ampere architecture starts supporting tfloat32 (see include/cutlass/tfloat32.h)
data types in tensor cores. One big advantage is that we can load in fp32 data and convert them
implicitly to tf32 inside the GEMM kernel which means no change is needed to accelerate traditional
fp32 data by using NVIDIA Ampere architecture.
We can use the tf32 mode of tensor core to emulate a fast accurate SGEMM kernel which is accelerated
using Ampere Tensor Cores (see include/cutlass/gemm/warp/mma_tensor_op_fast_f32.h).
The trick is very simple
a x b = (a_big + a_small) x (b_big + b_small) = a_big x b_big + a_big x b_small + a_small x b_big
big = convert_to_tf32(fp32)
small = convert_to_tf32(fp32 - big)
a_small x b_small is discarded because they are too small.
This example demonstrates usage of this kernel, along with accuracy measurements w.r.t. actual FP32
results (SGEMM using SIMT) and against FP64 results (DGEMM)
To enable this feature, the only change needs to make is to change the default OpMultiplyAdd to
OpMultiplyAddFastF32.
Now, we have several different flavors of sgemm now in the profiler for Ampere. Here are the difference
sgemm // CUDA core SIMT kernel. FP32 in, accumulated in FP32, FP32 out.
s1688gemm // Use 3xTF32 to emulate FP32. FP32 in, converted in TF32-big and TF32-small internally,
// accumulated in FP32, FP32 out.
s1688tf32gemm // Use 1xTF32. FP32 in, converted to one TF32 internally, accumulated in FP32, FP32 out.
s1688gemm_tf32 // TF32 in, accumulated in FP32, FP32 out.
*/
#include <iostream>
#include <vector>
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_reduce.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
int m, n, k;
double l2_norm_3xtf32_vs_fp64;
double l2_norm_1xtf32_vs_fp64;
double l2_norm_fp32_vs_fp64;
// ctor
Result(
int m, int n, int k,
double runtime_ms, double gflops,
double l2_norm_3xtf32_vs_fp64,
double l2_norm_1xtf32_vs_fp64,
double l2_norm_fp32_vs_fp64) :
m(m), n(n), k(k),
runtime_ms(runtime_ms), gflops(gflops),
l2_norm_3xtf32_vs_fp64(l2_norm_3xtf32_vs_fp64),
l2_norm_1xtf32_vs_fp64(l2_norm_1xtf32_vs_fp64),
l2_norm_fp32_vs_fp64(l2_norm_fp32_vs_fp64) {}
Result() {}
//
// Methods
//
static void print_csv_header() {
std::cout << "M,N,K,Runtime(ms),GFLOPS,3xTF32_vs_FP64,1xTF32_vs_FP64,FP32_vs_FP64" << std::endl;
}
void print_csv_row() {
std::cout << m << ","
<< n << ","
<< k << ","
<< runtime_ms << ","
<< gflops << ","
<< l2_norm_3xtf32_vs_fp64 << ","
<< l2_norm_1xtf32_vs_fp64 << ","
<< l2_norm_fp32_vs_fp64 << std::endl;
}
};
std::vector<Result> results;
///////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
float alpha;
float beta;
std::string rand_mode;
int iterations;
int seed;
bool benchmark;
Options():
help(false),
problem_size({3456, 4096, 4096}),
iterations(20),
seed(1),
alpha(1),
beta(),
rand_mode("uniform"),
benchmark(false) { }
bool valid() {
//
// CUTLASS attempts to load 128b vectors of F32 elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 4 elements.
//
int const kAlignment = 4;
if ((problem_size.m() % kAlignment) ||
(problem_size.n() % kAlignment) ||
(problem_size.k() % kAlignment)) {
// misaligned tensors
return false;
}
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("seed", seed);
cmd.get_cmd_line_argument("rand_mode", rand_mode);
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "27_ampere_3xtf32_fast_accurate_tensorop_gemm example\n\n"
<< " This example uses the CUTLASS Library to emulate FP32 with TF32 tensorop GEMM computations.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --rand_mode=<string> gauss / uniform*\n\n"
<< " --seed=<int> Random number seed (1*)\n\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm/27_ampere_3xtf32_fast_accurate_tensorop_gemm --m=1024 --n=512 \\\n"
<< " --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product();
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 64, 16>; // <- threadblock tile M = 128, N = 128, K = 16
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 32, 16>; // <- warp tile M = 64, N = 64, K = 16
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
float, // <- data type of output matrix
128 / cutlass::sizeof_bits<float>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
float, // <- data type of accumulator
float>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 3;
// Alignment
constexpr int Alignment = 4;
//
// Gemm Operators (Gemm_3xTF32, Gemm_1xTF32, GEMM_F32, GEMM_F64)
//
// Gemm_3xTF32
using Gemm_3xTF32 = cutlass::gemm::device::Gemm<
float,
LayoutInputA,
float,
LayoutInputB,
float,
LayoutOutput,
float,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
Alignment,
Alignment,
false,
cutlass::arch::OpMultiplyAddFastF32>;
// Gemm_1xTF32
using Gemm_1xTF32 = cutlass::gemm::device::Gemm<
float,
LayoutInputA,
float,
LayoutInputB,
float,
LayoutOutput,
float,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
Alignment,
Alignment,
false,
cutlass::arch::OpMultiplyAdd>;
// Gemm_F64
using Gemm_F64 = cutlass::reference::device::Gemm<
double,
LayoutInputA,
double,
LayoutInputB,
double,
LayoutOutput,
double,
double>;
// Gemm_F32
using Gemm_F32 = cutlass::reference::device::Gemm<
float,
LayoutInputA,
float,
LayoutInputB,
float,
LayoutOutput,
float,
float>;
bool run(Options &options) {
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size = options.problem_size;
////////////////////////////////////////////////////////////////////////////////
/// 1. Initialize F32 Precision input tensors using CUTLASS helper functions
////////////////////////////////////////////////////////////////////////////////
cutlass::HostTensor<float, LayoutInputA> tensor_a_F32(problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<float, LayoutInputB> tensor_b_F32(problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<float, LayoutOutput> tensor_c_F32(problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<float, LayoutOutput> tensor_d_F32(problem_size.mn()); // <- Create matrix D with dimensions M x N
if (options.rand_mode == "uniform") {
const float min = -1;
const float max = 1;
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a_F32.host_view(),
options.seed,
double(max),
double(min)); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b_F32.host_view(),
options.seed,
double(max),
double(min)); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_F32.host_view(),
options.seed,
double(max),
double(min)); // <- Fill matrix C on host with uniform-distribution random data
} else if (options.rand_mode == "gauss") {
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomGaussian(
tensor_a_F32.host_view(),
options.seed,
double(0),
double(5)); // <- Fill matrix A on host with gaussian-distribution random data
cutlass::reference::host::TensorFillRandomGaussian(
tensor_b_F32.host_view(),
options.seed,
double(0),
double(5)); // <- Fill matrix B on host with gaussian-distribution random data
cutlass::reference::host::TensorFillRandomGaussian(
tensor_c_F32.host_view(),
options.seed,
double(0),
double(5)); // <- Fill matrix C on host with gaussian-distribution random data
}
cutlass::reference::host::TensorFill(
tensor_d_F32.host_view()); // <- fill matrix D on host with zeros
// Copy data from host to GPU
tensor_a_F32.sync_device();
tensor_b_F32.sync_device();
tensor_c_F32.sync_device();
tensor_d_F32.sync_device();
////////////////////////////////////////////////////////////////////////////////
/// 2. Initialize F64 tensors using the same values used for F32
////////////////////////////////////////////////////////////////////////////////
// Gemm input operands (A, B, C)
cutlass::HostTensor<double, LayoutInputA> tensor_a_F64(problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<double, LayoutInputB> tensor_b_F64(problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<double, LayoutOutput> tensor_c_F64(problem_size.mn()); // <- Create matrix C with dimensions M x N
// Gemm output (D) for GEMM_F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_F64(problem_size.mn()); // <- Create matrix D with dimensions M x N
// Gemm output (D) for GEMM_3xTF32
cutlass::HostTensor<float, LayoutOutput> tensor_d_3xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N
// Gemm output (D) for GEMM_1xTF32
cutlass::HostTensor<float, LayoutOutput> tensor_d_1xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N
// Copy values from the DP tensors
cutlass::reference::host::TensorCopy(tensor_a_F64.host_view(), tensor_a_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_b_F64.host_view(), tensor_b_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_c_F64.host_view(), tensor_c_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_F64.host_view(), tensor_d_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_3xTF32.host_view(), tensor_d_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_1xTF32.host_view(), tensor_d_F32.host_view());
// Copy data from host to GPU
tensor_a_F64.sync_device();
tensor_b_F64.sync_device();
tensor_c_F64.sync_device();
tensor_d_F64.sync_device();
tensor_d_3xTF32.sync_device();
tensor_d_1xTF32.sync_device();
// Initialize alpha and beta for dot product computation
float alpha = float(options.alpha);
float beta = float(options.beta);
// Split K dimension into 1 partitions
int split_k_slices = 1;
////////////////////////////////////////////////////////////////////////////////
/// 3. Run 3xTF32 kernel within a profiling loop
////////////////////////////////////////////////////////////////////////////////
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm_3xTF32::Arguments arguments_3xtf32{problem_size, // <- problem size of matrix multiplication
tensor_a_F32.device_ref(), // <- reference to matrix A on device
tensor_b_F32.device_ref(), // <- reference to matrix B on device
tensor_c_F32.device_ref(), // <- reference to matrix C on device
tensor_d_3xTF32.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size_3xtf32 = Gemm_3xTF32::get_workspace_size(arguments_3xtf32);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace_3xtf32(workspace_size_3xtf32);
// Instantiate CUTLASS kernel depending on templates
Gemm_3xTF32 gemm_op_3xTF32;
// Check the problem size is supported or not
cutlass::Status status_3xtf32 = gemm_op_3xTF32.can_implement(arguments_3xtf32);
CUTLASS_CHECK(status_3xtf32);
// Initialize CUTLASS kernel with arguments and workspace pointer
status_3xtf32 = gemm_op_3xTF32.initialize(arguments_3xtf32, workspace_3xtf32.get());
CUTLASS_CHECK(status_3xtf32);
// Result structure
Result result;
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return false;
}
}
// Record an event at the start of a series of GEMMs
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return false;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
// Launch initialized CUTLASS kernel
status_3xtf32 = gemm_op_3xTF32();
CUTLASS_CHECK(status_3xtf32);
}
//
// Stop profiling loop
//
// Record an event when the GEMMs are complete
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return false;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return false;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return false;
}
// Compute average runtime and GFLOPs.
result.m = problem_size.m();
result.n = problem_size.n();
result.k = problem_size.k();
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
tensor_d_3xTF32.sync_host();
////////////////////////////////////////////////////////////////////////////////
/// 4. Run TF32 kernel without profiling loop
////////////////////////////////////////////////////////////////////////////////
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm_1xTF32::Arguments arguments_1xtf32{problem_size, // <- problem size of matrix multiplication
tensor_a_F32.device_ref(), // <- reference to matrix A on device
tensor_b_F32.device_ref(), // <- reference to matrix B on device
tensor_c_F32.device_ref(), // <- reference to matrix C on device
tensor_d_1xTF32.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size_1xtf32 = Gemm_1xTF32::get_workspace_size(arguments_1xtf32);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace_1xtf32(workspace_size_1xtf32);
// Instantiate CUTLASS kernel depending on templates
Gemm_1xTF32 gemm_op_1xtf32;
// Check the problem size is supported or not
cutlass::Status status_1xtf32 = gemm_op_1xtf32.can_implement(arguments_1xtf32);
CUTLASS_CHECK(status_1xtf32);
// Initialize CUTLASS kernel with arguments and workspace pointer
status_1xtf32 = gemm_op_1xtf32.initialize(arguments_1xtf32, workspace_1xtf32.get());
CUTLASS_CHECK(status_1xtf32);
// Launch initialized CUTLASS kernel
status_1xtf32 = gemm_op_1xtf32();
CUTLASS_CHECK(status_1xtf32);
tensor_d_1xTF32.sync_host();
////////////////////////////////////////////////////////////////////////////////
// Run reference kernel (F64)
////////////////////////////////////////////////////////////////////////////////
// Create instantiation for device reference gemm kernel
Gemm_F64 gemm_f64;
// Launch device reference gemm kernel
gemm_f64(problem_size,
alpha,
tensor_a_F64.device_ref(),
tensor_b_F64.device_ref(),
beta,
tensor_c_F64.device_ref(),
tensor_d_F64.device_ref());
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d_F64.sync_host();
////////////////////////////////////////////////////////////////////////////////
// Run reference kernel (F32)
////////////////////////////////////////////////////////////////////////////////
// Create instantiation for device reference gemm kernel
Gemm_F32 gemm_f32;
// Launch device reference gemm kernel
gemm_f32(problem_size,
alpha,
tensor_a_F32.device_ref(),
tensor_b_F32.device_ref(),
beta,
tensor_c_F32.device_ref(),
tensor_d_F32.device_ref());
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d_F32.sync_host();
////////////////////////////////////////////////////////////////////////////////
/////// Compute l2 norms
////////////////////////////////////////////////////////////////////////////////
// l2 norm 3xTF32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_3xTF32_in_F64(problem_size.mn());
cutlass::reference::host::TensorCopy(tensor_d_3xTF32_in_F64.host_view(), tensor_d_3xTF32.host_view());
result.l2_norm_3xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_3xTF32_in_F64.host_view(), tensor_d_F64.host_view());
// l2 norm 1xTF32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_1xTF32_in_F64(problem_size.mn());
cutlass::reference::host::TensorCopy(tensor_d_1xTF32_in_F64.host_view(), tensor_d_1xTF32.host_view());
result.l2_norm_1xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_1xTF32_in_F64.host_view(), tensor_d_F64.host_view());
// l2 norm F32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_F32_in_F64(problem_size.mn());
cutlass::reference::host::TensorCopy(tensor_d_F32_in_F64.host_view(), tensor_d_F32.host_view());
result.l2_norm_fp32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_F32_in_F64.host_view(), tensor_d_F64.host_view());
results.push_back(result);
///////////////////////////////////////////////////////////////////////////////
// Check if output from CUTLASS kernel and reference kernel are equal or not
std::cout << std::fixed;
std::cout.precision(4);
std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout.precision(2);
std::cout << "GFLOPs: " << result.gflops << std::endl;
std::cout << "Normalized L2 norm of" << std::endl;
std::cout.precision(8);
std::cout << std::scientific
<< " - 3xTF32 error with FP64 reference : " << result.l2_norm_3xtf32_vs_fp64 << std::endl
<< " - 1xTF32 error with FP64 reference : " << result.l2_norm_1xtf32_vs_fp64 << std::endl
<< " - FP32 error with FP64 reference : " << result.l2_norm_fp32_vs_fp64 << std::endl;
return true;
}
int main(int argc, const char **argv) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
bool result = true;
if (options.benchmark) {
for (int k = 4; k <= 65536; k *= 2) {
options.problem_size[2] = k;
printf("Gemm problem size: %d x %d x %d\n", \
options.problem_size.m(), options.problem_size.n(), options.problem_size.k());
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
result &= run(options);
}
} else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
result = run(options);
}
if (!result) return -1;
std::cout << std::endl << "CSV results" << std::endl;
Result::print_csv_header();
for(auto &r : results)
r.print_csv_row();
return 0;
}
| examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm/27_ampere_3xtf32_fast_accurate_tensorop_gemm.cu/0 | {
"file_path": "examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm/27_ampere_3xtf32_fast_accurate_tensorop_gemm.cu",
"repo_id": "examples",
"token_count": 12989
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Grouped FMHA kernel
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "fmha_grouped_problem_visitor.h"
#include "gemm_kernel_utils.h"
#include "gemm/mma_accum_lambda_iterator.h"
#include "epilogue/epilogue_rescale_output.h"
namespace {
static CUTLASS_DEVICE float atomicMaxFloat(float* addr, float value) {
// source: https://stackoverflow.com/a/51549250
return (value >= 0)
? __int_as_float(atomicMax((int*)addr, __float_as_int(value)))
: __uint_as_float(atomicMin((unsigned int*)addr, __float_as_uint(value)));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename MM0_, ///! Structure for computing P = Q @ K
typename MM1_, ///! Structure for computing O = P @ V
typename scalar_t_,
typename accum_t_,
typename output_t_,
typename output_accum_t_,
bool kKeepOutputInRF, ///! Whether the intermediate output from MM0_ should be kept in the register file
GroupScheduleMode GroupScheduleMode_ ///! Type of scheduling to perform
>
struct FMHAGrouped {
public:
using MM0 = MM0_;
using MM1 = MM1_;
using scalar_t = scalar_t_;
using accum_t = accum_t_;
using output_t = output_t_;
using output_accum_t = output_accum_t_;
static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_;
static constexpr bool kNeedsOutputAccumulatorBuffer = !kKeepOutputInRF &&
!cutlass::platform::is_same<output_accum_t, output_t>::value;
// Parameters to satisfy BaseGrouped
using ElementA = scalar_t;
using ElementB = scalar_t;
using ElementC = accum_t;
using LayoutA = typename MM0::LayoutA;
using LayoutB = typename MM0::ElementB;
using LayoutC = typename MM1::ElementC;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
static int const kAlignmentA = MM0::kAlignmentA;
static int const kAlignmentB = MM0::kAlignmentB;
static int const kAlignmentC = 1;
using Mma = typename MM1::Mma;
using EpilogueOutputOp = typename MM1::EpilogueOutputOp;
using ThreadblockSwizzle = void;
using Operator = typename MM1::Operator;
using WarpShape = typename MM1::WarpShape;
using InstructionShape = typename MM1::InstructionShape;
using ElementQ = scalar_t;
using ElementK = scalar_t;
using ElementP = accum_t;
using ElementV = scalar_t;
using ElementO = output_t;
using ElementOAccum = output_accum_t;
using ElementAccumulator = accum_t;
using LayoutQ = typename MM0::LayoutA;
using LayoutK = typename MM0::LayoutB;
using LayoutP = typename MM0::LayoutC;
using LayoutV = typename MM1::LayoutB;
using LayoutO = typename MM1::LayoutC;
static bool const kPreloadV = (MM1::Mma::ArchTag::kMinComputeCapability >= 80 &&
cutlass::sizeof_bits<ElementV>::value == 16);
static int const kAlignmentQ = MM0::kAlignmentA;
static int const kAlignmentK = MM0::kAlignmentB;
static int const kAlignmentV = 1;
using ThreadblockShape = typename MM0::ThreadblockShape;
static int const kQueriesPerBlock = ThreadblockShape::kM;
static int const kKeysPerBlock = ThreadblockShape::kN;
static constexpr bool kSupportsDropout = false;
static constexpr bool kSupportsBias = false;
/// Warp count (concept: GemmShape)
using WarpCount = typename MM1::WarpCount;
static int const kThreadsPerWarp = 32;
static int const kThreadCount = kThreadsPerWarp * WarpCount::kCount;
static constexpr int kNumWarpsPerBlock =
kQueriesPerBlock * kKeysPerBlock / (kThreadsPerWarp * kThreadsPerWarp);
using ProblemVisitor = FMHAGroupedProblemVisitor<
ThreadblockShape,
kGroupScheduleMode,
kThreadCount,
kThreadCount>;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord *problem_sizes0{nullptr};
GemmCoord *problem_sizes1{nullptr};
int problem_count{0};
int threadblock_count{0};
ElementQ ** ptr_Q{nullptr};
ElementK ** ptr_K{nullptr};
ElementP ** ptr_P{nullptr};
ElementV ** ptr_V{nullptr};
ElementO ** ptr_O{nullptr};
ElementOAccum ** ptr_O_accum{nullptr};
typename LayoutQ::Stride::LongIndex *ldq{nullptr};
typename LayoutK::Stride::LongIndex *ldk{nullptr};
typename LayoutP::Stride::LongIndex *ldv{nullptr};
typename LayoutO::Stride::LongIndex *ldo{nullptr};
// Whether causal masking is to be performed
bool causal{false};
// Scale
ElementAccumulator scale{0};
// Only used by device-level operator
GemmCoord *host_problem_sizes{nullptr};
//
// Methods
//
/// Default ctor
Arguments() = default;
/// Ctor
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord *problem_sizes0,
GemmCoord *problem_sizes1,
int problem_count,
int threadblock_count,
ElementQ ** ptr_Q,
ElementK ** ptr_K,
ElementP ** ptr_P,
ElementV ** ptr_V,
ElementO ** ptr_O,
ElementOAccum ** ptr_O_accum,
typename LayoutQ::Stride::LongIndex *ldq,
typename LayoutK::Stride::LongIndex *ldk,
typename LayoutP::Stride::LongIndex *ldp,
typename LayoutV::Stride::LongIndex *ldv,
typename LayoutO::Stride::LongIndex *ldo,
bool causal,
ElementAccumulator scale,
GemmCoord *host_problem_sizes=nullptr
):
problem_sizes0(problem_sizes0),
problem_sizes1(problem_sizes1),
problem_count(problem_count),
threadblock_count(threadblock_count),
ptr_Q(ptr_Q),
ptr_K(ptr_K),
ptr_P(ptr_P),
ptr_V(ptr_V),
ptr_O(ptr_O),
ptr_O_accum(kNeedsOutputAccumulatorBuffer ? ptr_O_accum : (accum_t**)ptr_O),
ldq(ldq),
ldk(ldk),
ldv(ldv),
ldo(ldo),
causal(causal),
scale(scale),
host_problem_sizes(host_problem_sizes)
{
}
bool __host__ check_supported() {
CHECK_ALIGNED_PTR(ptr_Q, kAlignmentQ);
CHECK_ALIGNED_PTR(ptr_K, kAlignmentK);
CHECK_ALIGNED_PTR(ptr_V, kAlignmentV);
XFORMERS_CHECK(ldq % kAlignmentQ == 0, "query is not correctly aligned");
XFORMERS_CHECK(ldk % kAlignmentK == 0, "key is not correctly aligned");
XFORMERS_CHECK(ldv % kAlignmentV == 0, "value is not correctly aligned");
return true;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
typename ProblemVisitor::Params problem_visitor;
int threadblock_count;
ElementQ ** ptr_Q;
ElementK ** ptr_K;
ElementP ** ptr_P;
ElementV ** ptr_V;
ElementO ** ptr_O;
ElementOAccum ** ptr_O_accum;
typename LayoutQ::Stride::LongIndex *ldq;
typename LayoutK::Stride::LongIndex *ldk;
typename LayoutP::Stride::LongIndex *ldv;
typename LayoutO::Stride::LongIndex *ldo;
ElementAccumulator scale;
bool causal;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
ptr_Q(nullptr),
ptr_K(nullptr),
ptr_P(nullptr),
ptr_V(nullptr),
ptr_O(nullptr),
ptr_O_accum(nullptr),
ldq(nullptr),
ldk(nullptr),
ldv(nullptr),
ldo(nullptr),
causal(false),
scale(0)
{ }
CUTLASS_HOST_DEVICE
Params(Arguments const &args,
void *workspace = nullptr,
int tile_count = 0):
problem_visitor(args.problem_sizes0, args.problem_sizes1, args.problem_count, workspace, tile_count),
threadblock_count(args.threadblock_count),
ptr_Q(args.ptr_Q),
ptr_K(args.ptr_K),
ptr_P(args.ptr_P),
ptr_V(args.ptr_V),
ptr_O(args.ptr_O),
ptr_O_accum(kNeedsOutputAccumulatorBuffer ? args.ptr_O_accum : (accum_t**)args.ptr_O),
ldq(args.ldq),
ldk(args.ldk),
ldv(args.ldv),
ldo(args.ldo),
causal(args.causal),
scale(args.scale)
{
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr,
int tile_count = 0) {
problem_visitor = typename ProblemVisitor::Params(args.problem_sizes0,
args.problem_sizes1,
args.problem_count,
workspace, tile_count);
threadblock_count = args.threadblock_count;
ptr_Q = args.ptr_Q;
ptr_K = args.ptr_K;
ptr_P = args.ptr_P;
ptr_V = args.ptr_V;
ptr_O = args.ptr_O;
ptr_O_accum = kNeedsOutputAccumulatorBuffer ? args.ptr_O_accum : (accum_t**)args.ptr_O;
ldq = args.ldq;
ldk = args.ldk;
ldv = args.ldv;
ldo = args.ldo;
causal = args.causal;
scale = args.scale;
}
};
// Shared storage - depends on kernel params
struct ScalingCoefs {
cutlass::Array<ElementAccumulator, kQueriesPerBlock> m_prime;
cutlass::Array<ElementAccumulator, kQueriesPerBlock> s_prime;
cutlass::Array<ElementAccumulator, kQueriesPerBlock> mi;
cutlass::Array<ElementAccumulator, kQueriesPerBlock> out_rescale;
cutlass::Array<ElementAccumulator, kQueriesPerBlock * MM0::MmaCore::WarpCount::kN>
addition_storage;
};
struct SharedStorageEpilogueAtEnd : ScalingCoefs {
struct SharedStorageAfterMM0 {
// Everything here might be overwritten during MM0
typename MM0::AccumulatorSharedStorage si;
typename MM1::Mma::SharedStorage mm1;
};
union {
typename MM0::Mma::SharedStorage mm0;
SharedStorageAfterMM0 after_mm0;
typename MM1::DefaultEpilogue::SharedStorage epilogue;
};
CUTLASS_DEVICE typename MM1::DefaultEpilogue::SharedStorage&
epilogue_shared_storage() {
return epilogue;
}
// ProblemVisitor shared storage can't be overlapped with others
typename ProblemVisitor::SharedStorage problem_visitor;
};
struct SharedStorageEpilogueInLoop : ScalingCoefs {
struct SharedStorageAfterMM0 {
// Everything here might be overwritten during MM0
typename MM0::AccumulatorSharedStorage si;
typename MM1::Mma::SharedStorage mm1;
typename MM1::DefaultEpilogue::SharedStorage epilogue;
};
union {
typename MM0::Mma::SharedStorage mm0;
SharedStorageAfterMM0 after_mm0;
};
CUTLASS_DEVICE typename MM1::DefaultEpilogue::SharedStorage&
epilogue_shared_storage() {
return after_mm0.epilogue;
}
// ProblemVisitor shared storage can't be overlapped with others
typename ProblemVisitor::SharedStorage problem_visitor;
};
using SharedStorage = typename cutlass::platform::conditional<
kKeepOutputInRF,
SharedStorageEpilogueAtEnd,
SharedStorageEpilogueInLoop>::type;
private:
// Parameters to be used by an individual tile
struct TileParams {
CUTLASS_HOST_DEVICE
static int query_start(int threadblock_idx) {
return threadblock_idx * kQueriesPerBlock;
}
// Returns whether this threadblock computes within the number of queries,
// which is determined by the M dimension of problem 0
CUTLASS_HOST_DEVICE
static bool can_compute(int threadblock_idx, const GemmCoord& problem_size0) {
return query_start(threadblock_idx) < problem_size0.m();
}
CUTLASS_HOST_DEVICE
static int num_queries(int threadblock_idx, const GemmCoord& problem_size0) {
return problem_size0.m() - query_start(threadblock_idx);
}
CUTLASS_HOST_DEVICE
static int num_keys(int threadblock_idx, const GemmCoord& problem_size0, bool causal) {
int nk = problem_size0.n();
if (causal) {
nk = cutlass::fast_min(int32_t(query_start(threadblock_idx) + kQueriesPerBlock), nk);
}
return nk;
}
};
public:
//
// Methods
//
CUTLASS_DEVICE
FMHAGrouped() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) {
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return Status::kSuccess;
}
static CUTLASS_DEVICE int16_t thread_id() {
return threadIdx.x;
}
static CUTLASS_DEVICE int8_t warp_id() {
return threadIdx.x / kThreadsPerWarp;
}
static CUTLASS_DEVICE int8_t lane_id() {
return threadIdx.x % kThreadsPerWarp;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
auto& m_prime = shared_storage.m_prime;
auto& s_prime = shared_storage.s_prime;
[[maybe_unused]] auto& si = shared_storage.after_mm0.si;
auto& mi = shared_storage.mi;
auto& out_rescale = shared_storage.out_rescale;
ProblemVisitor problem_visitor(
params.problem_visitor,
shared_storage.problem_visitor,
blockIdx.x);
// Outer 'persistent' loop to iterate over tiles
while (problem_visitor.next_tile()) {
GemmCoord problem_size0 = problem_visitor.problem_size0();
GemmCoord problem_size1 = problem_visitor.problem_size1();
const int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx());
if (!TileParams::can_compute(threadblock_idx, problem_size0)) {
problem_visitor.advance(gridDim.x);
continue;
}
const int32_t problem_idx = problem_visitor.problem_index();
if (thread_id() < kQueriesPerBlock) {
s_prime[thread_id()] = ElementAccumulator(0);
out_rescale[thread_id()] = accum_t(1.0);
m_prime[thread_id()] =
-cutlass::platform::numeric_limits<ElementAccumulator>::infinity();
mi[thread_id()] = -cutlass::platform::numeric_limits<ElementAccumulator>::infinity();
}
ElementO *ptr_O = params.ptr_O[problem_idx] + TileParams::query_start(threadblock_idx) * params.ldo[problem_idx];
ElementOAccum *ptr_O_accum = params.ptr_O_accum[problem_idx] + TileParams::query_start(threadblock_idx) * params.ldo[problem_idx];
const int num_queries = TileParams::num_queries(threadblock_idx, problem_size0);
auto createOutputIter = [&](int col) -> typename MM1::OutputTileIterator {
using OutputTileIterator = typename MM1::OutputTileIterator;
return OutputTileIterator(
typename OutputTileIterator::Params{(int32_t)params.ldo[problem_idx]},
ptr_O,
typename OutputTileIterator::TensorCoord{
num_queries, problem_size1.n()},
thread_id(),
{0, col});
};
auto createOutputAccumIter = [&](int col) ->
typename MM1::OutputTileIteratorAccum {
using OutputTileIteratorAccum = typename MM1::OutputTileIteratorAccum;
return OutputTileIteratorAccum(
typename OutputTileIteratorAccum::Params{(int32_t)params.ldo[problem_idx]},
ptr_O_accum,
typename OutputTileIteratorAccum::TensorCoord{
num_queries, problem_size1.n()},
thread_id(),
{0, col});
};
typename MM1::Mma::FragmentC accum_o;
accum_o.clear();
const int num_keys = TileParams::num_keys(threadblock_idx, problem_size0, params.causal);
for (int32_t iter_key_start = 0; iter_key_start < num_keys;
iter_key_start += kKeysPerBlock) {
int32_t problem_size_0_m =
cutlass::fast_min((int32_t)kQueriesPerBlock, num_queries);
int32_t problem_size_0_n = cutlass::fast_min(
(int32_t)kKeysPerBlock, num_keys - iter_key_start);
int32_t const& problem_size_0_k = problem_size0.k();
int32_t const& problem_size_1_n = problem_size1.n();
int32_t const& problem_size_1_k = problem_size_0_n;
auto prologueV = [&](int blockN) {
typename MM1::Mma::IteratorB iterator_V(
typename MM1::IteratorB::Params{MM1::LayoutB(params.ldv[problem_idx])},
params.ptr_V[problem_idx] + iter_key_start * params.ldv[problem_idx],
{problem_size_1_k, problem_size_1_n},
thread_id(),
cutlass::MatrixCoord{0, blockN * MM1::Mma::Shape::kN});
MM1::Mma::prologue(
shared_storage.after_mm0.mm1,
iterator_V,
thread_id(),
problem_size_1_k);
};
__syncthreads(); // Need to have shared memory initialized, and `m_prime`
// updated from end of prev iter
//
// MATMUL: Q.K_t
//
// Computes the block-matrix product of:
// (a) query[query_start:query_end, :]
// with
// (b) key[iter_key_start:iter_key_start + kKeysPerBlock]
// and stores that into `shared_storage.si`
//
ElementQ *ptr_Q = params.ptr_Q[problem_idx] + TileParams::query_start(threadblock_idx) * params.ldq[problem_idx];
// Construct iterators to A and B operands
typename MM0::IteratorA iterator_A(
typename MM0::IteratorA::Params(
typename MM0::MmaCore::LayoutA(params.ldq[problem_idx])),
ptr_Q,
{problem_size_0_m, problem_size_0_k},
thread_id(),
{0, 0});
typename MM0::IteratorB iterator_B(
typename MM0::IteratorB::Params(
typename MM0::MmaCore::LayoutB(params.ldk[problem_idx])),
params.ptr_K[problem_idx] + iter_key_start * params.ldk[problem_idx],
{problem_size_0_k, problem_size_0_n},
thread_id(),
{0, 0});
// Construct thread-scoped matrix multiply
typename MM0::Mma mma(
shared_storage.mm0, thread_id(), warp_id(), lane_id());
typename MM0::Mma::FragmentC accum;
accum.clear();
auto gemm_k_iterations =
(problem_size_0_k + MM0::Mma::Shape::kK - 1) / MM0::Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum);
__syncthreads();
if (kPreloadV) {
prologueV(0);
} else {
MM1::Mma::drain_cp_asyncs();
}
typename MM0::Mma::Operator::IteratorC::TensorCoord
iteratorC_tile_offset = {
(warp_id() % MM0::Mma::WarpCount::kM),
(warp_id() / MM0::Mma::WarpCount::kM)
};
// Mask out last if causal
if (params.causal && num_keys - iter_key_start <= kKeysPerBlock) {
auto lane_offset = MM0::AccumLambdaIterator::get_lane_offset(
lane_id(), warp_id(), iteratorC_tile_offset);
int32_t last_col;
MM0::AccumLambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {
last_col = TileParams::query_start(threadblock_idx) + accum_m - iter_key_start;
},
[&](int accum_m, int accum_n, int idx) {
if (accum_n > last_col) {
accum[idx] =
-cutlass::platform::numeric_limits<accum_t>::infinity();
}
},
[&](int accum_m) {});
}
// DISPATCH_BOOL(iter_key_start == 0, kIsFirst, ([&] {
// DISPATCH_BOOL(
// num_keys - iter_key_start >= kKeysPerBlock,
// kFullColumns,
// ([&] {
// // Update `mi` from accum stored in registers
// // Also does accum[i] <- exp(accum[i] - mi)
// iterative_softmax<
// typename MM0::Mma::Operator::IteratorC,
// kFullColumns,
// kIsFirst>(
// accum_o,
// accum,
// mi,
// m_prime,
// s_prime,
// lane_id(),
// thread_id(),
// warp_id(),
// num_keys - iter_key_start,
// iteratorC_tile_offset,
// kSupportsBias ? 1.0f : params.scale);
// }));
// }));
// Update `mi` from accum stored in registers
// Also does accum[i] <- exp(accum[i] - mi)
iterative_softmax<typename MM0::Mma::Operator::IteratorC>(
accum_o,
accum,
mi,
m_prime,
s_prime,
out_rescale,
shared_storage.addition_storage,
lane_id(),
thread_id(),
warp_id(),
num_keys - iter_key_start,
iter_key_start == 0,
iteratorC_tile_offset,
kSupportsBias ? 1.0f : params.scale);
// Output results to shared-memory
int warp_idx_mn_0 = warp_id() %
(MM0::Mma::Base::WarpCount::kM * MM0::Mma::Base::WarpCount::kN);
auto output_tile_coords = cutlass::MatrixCoord{
warp_idx_mn_0 % MM0::Mma::Base::WarpCount::kM,
warp_idx_mn_0 / MM0::Mma::Base::WarpCount::kM};
MM0::B2bGemm::accumToSmem(
shared_storage.after_mm0.si, accum, lane_id(), output_tile_coords);
__syncthreads();
//
// MATMUL: Attn . V
// Run the matmul `attn @ V` for a block of attn and V.
// `attn` is read from shared memory (in `shared_storage_si`)
// `V` is read from global memory (with iterator_B)
//
const int64_t nBlockN = kKeepOutputInRF ? 1
: ceil_div(
(int64_t)problem_size_1_n,
int64_t(MM1::ThreadblockShape::kN));
// Iterate over the N dimension of GEMM1
for (int blockN = 0; blockN < nBlockN; ++blockN) {
int gemm_k_iterations =
(problem_size_1_k + MM1::Mma::Shape::kK - 1) / MM1::Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add and store it in accum
// (in registers)
if (!kPreloadV) {
__syncthreads(); // we share shmem between mma and epilogue
}
typename MM1::Mma::IteratorB iterator_V(
typename MM1::IteratorB::Params{MM1::LayoutB(params.ldv[problem_idx])},
params.ptr_V[problem_idx] + iter_key_start * params.ldv[problem_idx],
{problem_size_1_k, problem_size_1_n},
thread_id(),
cutlass::MatrixCoord{0, blockN * MM1::Mma::Shape::kN});
typename MM1::Mma mma_pv(
// operand A: Pij_dropped in shared memory
shared_storage.after_mm0.si.accum_ref(),
// operand B: shared memory staging area for Vj, which is loaded
// from global memory
shared_storage.after_mm0.mm1.operand_B_ref(),
(int)thread_id(),
(int)warp_id(),
(int)lane_id());
mma_pv.set_prologue_done(kPreloadV);
if (!kKeepOutputInRF) {
accum_o.clear();
}
mma_pv(gemm_k_iterations, accum_o, iterator_V, accum_o);
__syncthreads();
if (kPreloadV && !kKeepOutputInRF && blockN + 1 < nBlockN) {
prologueV(blockN + 1);
}
if (!kKeepOutputInRF) {
MM1::Mma::drain_cp_asyncs();
DISPATCH_BOOL(
iter_key_start == 0, kIsFirst, ([&] {
DISPATCH_BOOL(
(iter_key_start + kKeysPerBlock) >= num_keys,
kIsLast,
([&] {
using DefaultEpilogue = typename MM1::DefaultEpilogue;
using DefaultOp = typename MM1::DefaultConfig::EpilogueOutputOp;
using ElementCompute = typename DefaultOp::ElementCompute;
using EpilogueOutputOp = typename cutlass::epilogue::
thread::MemoryEfficientAttentionNormalize<
typename cutlass::platform::conditional<
kIsLast,
output_t,
output_accum_t>::type,
output_accum_t,
DefaultOp::kCount,
typename DefaultOp::ElementAccumulator,
output_accum_t,
kIsFirst,
kIsLast,
cutlass::Array<ElementCompute, kQueriesPerBlock>>;
using Epilogue = typename cutlass::epilogue::threadblock::
EpiloguePipelined<
typename DefaultEpilogue::Shape,
typename MM1::Mma::Operator,
DefaultEpilogue::kPartitionsK,
typename cutlass::platform::conditional<
kIsLast,
typename MM1::OutputTileIterator,
typename MM1::OutputTileIteratorAccum>::type,
typename DefaultEpilogue::
AccumulatorFragmentIterator,
typename DefaultEpilogue::WarpTileIterator,
typename DefaultEpilogue::SharedLoadIterator,
EpilogueOutputOp,
typename DefaultEpilogue::Padding,
DefaultEpilogue::kFragmentsPerIteration,
true, // IterationsUnroll
typename MM1::OutputTileIteratorAccum // Read
// iterator
>;
int col = blockN * MM1::Mma::Shape::kN;
auto source_iter = createOutputAccumIter(col);
auto dest_iter = gemm_kernel_utils::call_conditional<
kIsLast,
decltype(createOutputIter),
decltype(createOutputAccumIter)>::
apply(createOutputIter, createOutputAccumIter, col);
EpilogueOutputOp rescale(s_prime, out_rescale);
Epilogue epilogue(
shared_storage.epilogue_shared_storage(),
thread_id(),
warp_id(),
lane_id());
epilogue(rescale, dest_iter, accum_o, source_iter);
}));
}));
if (!kKeepOutputInRF) {
__syncthreads();
}
}
}
__syncthreads(); // we modify `m_prime` after
}
if (kKeepOutputInRF) {
const bool kIsFirst = true;
const bool kIsLast = true;
using DefaultEpilogue = typename MM1::DefaultEpilogue;
using DefaultOp = typename MM1::DefaultConfig::EpilogueOutputOp;
using ElementCompute = typename DefaultOp::ElementCompute;
using EpilogueOutputOp =
typename cutlass::epilogue::thread::MemoryEfficientAttentionNormalize<
output_t, // output
output_accum_t, // source
DefaultOp::kCount,
typename DefaultOp::ElementAccumulator, // accum
output_accum_t, // compute
kIsFirst,
kIsLast,
cutlass::Array<ElementCompute, kQueriesPerBlock>>;
using Epilogue =
typename cutlass::epilogue::threadblock::EpiloguePipelined<
typename DefaultEpilogue::Shape,
typename MM1::Mma::Operator,
DefaultEpilogue::kPartitionsK,
typename MM1::OutputTileIterator, // destination
typename DefaultEpilogue::AccumulatorFragmentIterator,
typename DefaultEpilogue::WarpTileIterator,
typename DefaultEpilogue::SharedLoadIterator,
EpilogueOutputOp,
typename DefaultEpilogue::Padding,
DefaultEpilogue::kFragmentsPerIteration,
true, // IterationsUnroll
typename MM1::OutputTileIteratorAccum // source tile
>;
auto dest_iter = createOutputIter(0);
EpilogueOutputOp rescale(s_prime, out_rescale);
Epilogue epilogue(
shared_storage.epilogue_shared_storage(),
thread_id(),
warp_id(),
lane_id());
MM1::Mma::drain_cp_asyncs();
epilogue(rescale, dest_iter, accum_o);
}
// Next tile
problem_visitor.advance(gridDim.x);
__syncthreads(); // Don't start the next iteration until all threads are done using shared memory.
}
}
template <typename WarpIteratorC>
CUTLASS_DEVICE static void iterative_softmax(
typename WarpIteratorC::Fragment& frag_o, // output so far
typename WarpIteratorC::Fragment& frag,
cutlass::Array<accum_t, kQueriesPerBlock>& mi,
cutlass::Array<accum_t, kQueriesPerBlock>& m_prime,
cutlass::Array<accum_t, kQueriesPerBlock>& s_prime,
cutlass::Array<accum_t, kQueriesPerBlock>& out_rescale,
cutlass::Array<accum_t, kQueriesPerBlock * MM0::MmaCore::WarpCount::kN>&
addition_storage,
int8_t lane_id,
int8_t thread_id,
int8_t warp_id,
int max_col,
bool is_first,
typename WarpIteratorC::TensorCoord const& tile_offset,
float scaling) {
/* Iterates on the accumulator and corresponding position on result matrix
(1) Update `mi[r]` to the max value of the row `r`
(2) In a second iteration do the following:
(a) accum <- exp(accum - mi)
(b) m_prime <- exp(m_prime - mi)
(c) s_prime <- s_prime * m_prime + sum(accum)
All of this is done on registers, before we store all of this
on shared memory for the next matmul with Value.
*/
using Fragment = typename WarpIteratorC::Fragment;
using LambdaIterator = typename DefaultMmaAccumLambdaIterator<
WarpIteratorC,
accum_t,
kThreadsPerWarp>::Iterator;
// Convert to `accum_t` (rather than double)
constexpr float kLog2e = 1.4426950408889634074; // log_2(e) = M_LOG2E
static_assert(kQueriesPerBlock % kNumWarpsPerBlock == 0, "");
static constexpr int kLinesPerWarp = kQueriesPerBlock / kNumWarpsPerBlock;
frag = cutlass::multiplies<Fragment>()(scaling * kLog2e, frag);
auto lane_offset =
LambdaIterator::get_lane_offset(lane_id, warp_id, tile_offset);
// First update `mi` to the max per-row
{
accum_t max;
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {
max = -cutlass::platform::numeric_limits<accum_t>::infinity();
},
[&](int accum_m, int accum_n, int idx) {
if (accum_n < max_col) {
max = cutlass::fast_max(max, frag[idx]);
}
},
[&](int accum_m) {
// Having 4x atomicMax seems faster than reduce within warp
// first...
atomicMaxFloat(&mi[accum_m], max);
});
}
// Make sure we all share the update values for `mi`
__syncthreads();
// Doing this `exp` is quite expensive. Let's
// split it across the warps
bool restore_mi_to_minus_inf = false;
if (lane_id < kLinesPerWarp) {
int id = warp_id * kLinesPerWarp + lane_id;
auto m_prime_id = m_prime[id];
auto mi_id = mi[id];
bool changed = m_prime_id < mi_id; // `false` if both are -inf
if (changed) {
auto m_prime_exp = exp2f(m_prime_id - mi_id);
out_rescale[id] = m_prime_exp;
s_prime[id] *= m_prime_exp;
} else {
// Only when bias is enabled, it's possible that all the first values
// of attention are masked to `-inf`. In that case we want to avoid
// `nan = exp2f(-inf - (-inf))` so we temporarily set `mi` to 0
if (kSupportsBias &&
mi_id == -cutlass::platform::numeric_limits<accum_t>::infinity()) {
restore_mi_to_minus_inf = true;
mi[id] = 0.0f;
}
out_rescale[id] = 1.0f;
}
}
__syncthreads(); // Update output fragments
if (kKeepOutputInRF && !is_first) {
accum_t line_rescale;
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) { line_rescale = out_rescale[accum_m]; },
[&](int accum_m, int accum_n, int idx) {
frag_o[idx] = frag_o[idx] * line_rescale;
},
[&](int accum_m) {});
}
// Update accum_m, accum_n, ...
{
accum_t mi_row, total_row;
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) { mi_row = mi[accum_m]; },
[&](int accum_m, int accum_n, int idx) {
frag[idx] =
(accum_n < max_col) ? exp2f(frag[idx] - mi_row) : accum_t(0.0);
},
[&](int accum_m) {});
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) { total_row = 0.0; },
[&](int accum_m, int accum_n, int idx) { total_row += frag[idx]; },
[&](int accum_m) {
if (LambdaIterator::reduceSameRow(
lane_id, total_row, [](accum_t a, accum_t b) {
return a + b;
})) {
// NOTE: we could atomically add `total_row` to `s_prime`, but
// it's faster (and deterministic) to avoid atomics here
addition_storage
[accum_m + kQueriesPerBlock * tile_offset.column()] =
total_row;
}
});
}
__syncthreads();
if (lane_id < kLinesPerWarp) {
int id = warp_id * kLinesPerWarp + lane_id;
accum_t total_row = s_prime[id];
if (restore_mi_to_minus_inf) {
// Restore `mi`, see above when we set `restore_mi_to_minus_inf=true`
mi[id] = -cutlass::platform::numeric_limits<accum_t>::infinity();
} else {
m_prime[id] = mi[id];
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < MM0::MmaCore::WarpCount::kN; ++i) {
total_row += addition_storage[id + kQueriesPerBlock * i];
}
s_prime[id] = total_row;
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/fmha_grouped.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/fmha_grouped.h",
"repo_id": "examples",
"token_count": 17648
} | 6 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of tiles
from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses. The first tile
this iterator visits maybe partial, then the remaining tiles are complete.
So, we only need to compute the predicates twice, once before the first tile
and once for the remaining full tiles which can share the same predicates.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileAccessIteratorResidualLast
///
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
typename AccessType,
bool Gather = false>
class PredicatedTileAccessIteratorResidualLast;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for pitch-linear
/// data.
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_,
bool Gather>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::PitchLinear,
AdvanceRank,
ThreadMap_,
AccessType_,
Gather> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates<
Shape,
Element,
Layout,
AdvanceRank,
ThreadMap,
AccessType>;
static int const kAccessesPerVector =
ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(
!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
using Mask = typename UnderlyingPredicates::Mask;
/// Uses a non-template class
struct Params : PredicatedTileAccessIteratorParams {
using Base = PredicatedTileAccessIteratorParams;
// Default ctor
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: Base(
layout.stride(0),
MakePredicatedTileAccessIteratorDesc<
Shape,
Element,
Layout,
kAdvanceRank,
ThreadMap>()()) {}
CUTLASS_HOST_DEVICE
Params(Base const& base) : Base(base) {}
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char*;
private:
//
// Data members
//
UnderlyingPredicates the_predicates;
Mask residual_tile_mask;
/// Parameters object with precomputed internal state
Params params_;
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Below is used when Gather is turned on. We need to record strided_offset
/// and contiguous_offset separated to compute the offset by using
///
/// offset = contiguous_offset + indices[strided_offset]
///
/// Gather indices
int const* indices_;
Index gather_offset_strided;
private:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent,
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
the_predicates.compute_predicates_(extent, is_steady_state);
}
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
/// Precomputed parameters object
Params const& params,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const& threadblock_offset,
/// Gather indices
int const* indices = nullptr)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
the_predicates(extent),
indices_(indices) {
the_predicates.set_predicates(thread_id, threadblock_offset);
the_predicates.get_mask(residual_tile_mask);
// Working around a weird compiler bug happening on P100 for the backward.
// I've seen together: the_predicates.predicates_[0] = 14 (instead of 15)
// residual_tile_mask[0] = 15 (correct)
//
// Adding prints when the value is calculated (in `compute_predicates_`)
// sometimes removes the bug. The consequence is that we skip some
// element of a tensor, leading to wrong results
// Setting `compute_predicates_`'s second argument (`is_steady_state`) to
// true also seems to get rid of the bug - at the cost of twice as many
// comparisons.
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700)
constexpr bool kWorkAroundCompilerBug = false;
#else
constexpr bool kWorkAroundCompilerBug = true;
#endif
the_predicates.compute_predicates_(extent, true && !kWorkAroundCompilerBug);
// update internal pointers
Layout layout(params_.stride_);
if (!Gather) {
add_pointer_offset(layout(the_predicates.thread_offset_));
} else {
gather_offset_strided = the_predicates.thread_offset_.strided();
add_pointer_offset(
layout(make_Coord(the_predicates.thread_offset_.contiguous(), 0)));
}
}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
/// Precomputed parameters object
Params const& params,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
the_predicates.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool is_residual_tile) {
if (is_residual_tile) {
the_predicates.set_mask(residual_tile_mask);
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
if (!Gather) {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided());
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous());
pointer_ += Shape::kStrided * tile_offset.strided();
}
} else {
add_pointer_offset(Shape::kContiguous * tile_offset.contiguous());
gather_offset_strided += Shape::kStrided * tile_offset.strided();
}
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
if (Gather) {
assert(indices_);
if (!valid()) {
return nullptr;
}
LongIndex contiguous_offset = the_predicates.iteration_contiguous_ *
(ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value /
8) +
the_predicates.iteration_vector_;
int strided_index = gather_offset_strided +
the_predicates.iteration_strided_ * ThreadMap::Delta::kStrided;
LongIndex strided_offset = indices_[strided_index] *
LongIndex(params_.stride_) * sizeof_bits<Element>::value / 8;
return reinterpret_cast<AccessType*>(
pointer_ + contiguous_offset + strided_offset);
}
return reinterpret_cast<AccessType*>(
pointer_ +
the_predicates.iteration_contiguous_ *
(ThreadMap::Delta::kContiguous *
sizeof_bits<Element>::value) /
8) +
the_predicates.iteration_vector_;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
the_predicates.operator++();
++the_predicates.iteration_vector_;
if (the_predicates.iteration_vector_ < kAccessesPerVector) {
return *this;
}
the_predicates.iteration_vector_ = 0;
++the_predicates.iteration_contiguous_;
if (the_predicates.iteration_contiguous_ <
ThreadMap::Iterations::kContiguous) {
return *this;
}
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
the_predicates.iteration_contiguous_ = 0;
++the_predicates.iteration_strided_;
if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) {
if (!Gather) {
pointer_ += params_.inc_strided_;
}
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
the_predicates.iteration_strided_ = 0;
if (!Gather) {
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced,
// this subtraction as well as the subsequent integer addition are both
// elided by the compiler.
pointer_ -= params_.inc_advance_;
}
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
the_predicates.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
the_predicates.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
the_predicates.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
the_predicates.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() const {
return the_predicates.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for column-major
/// data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_,
bool Gather>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::ColumnMajor,
AdvanceRank,
ThreadMap_,
AccessType_,
Gather> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
AccessType,
Gather>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorResidualLast;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: params_(layout::PitchLinear(layout.stride(0))){};
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const& base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
///< Precomputed parameters object
Params const& params,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const& threadblock_offset,
int const* indices =
nullptr ///< gather/scatter indices, note no support for
///< gather/scatter at this specialization
)
: iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row(),
threadblock_offset.column()),
indices) {}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
Params const& params, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iterator_.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool enable) {
iterator_.set_residual_tile(enable);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
return reinterpret_cast<AccessType*>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
iterator_.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for row-major
/// data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_,
bool Gather>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::RowMajor,
AdvanceRank,
ThreadMap_,
AccessType_,
Gather> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
AccessType,
Gather>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorResidualLast;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: params_(layout::PitchLinear(layout.stride(0))){};
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const& base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
///< Precomputed parameters object
Params const& params,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const& threadblock_offset,
/// Gather indices
int const* indices = nullptr)
: iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column(),
threadblock_offset.row()),
indices) {}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
Params const& params, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iterator_.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool enable) {
iterator_.set_residual_tile(enable);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
return reinterpret_cast<AccessType*>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
iterator_.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank 2
/// data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::AffineRankN<2>,
AdvanceRank,
ThreadMap_,
AccessType_,
false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRankN<2>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates<
Shape,
Element,
layout::PitchLinear,
AdvanceRank,
ThreadMap,
AccessType>;
static int const kAccessesPerVector =
ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(
!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingPredicates::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend PredicatedTileAccessIteratorResidualLast;
private:
/// stride of pitch-linear layout (units of Element)
Coord<Layout::kStrideRank, Layout::LongIndex> stride_;
/// amount (in byte) to increment pointer to move to next access along
/// contiguous dimension
LongIndex inc_contiguous_;
/// amount (in byte) to increment pointer from first access of current
/// contiguous dimension to first access of next one.
LongIndex inc_strided_;
/// amount (in byte) to increment pointer from last access of current
/// contiguous dimension to first access of next one.
LongIndex inc_next_strided_;
/// amount (in byte) to increment pointer from last access to first access
/// of next tile
LongIndex inc_next_;
/// amount (in byte) to increment pointer from first access of current tile
/// to first access of next tile
LongIndex inc_advance_;
public:
// Default ctor
CUTLASS_HOST_DEVICE
Params()
: stride_(0),
inc_contiguous_(0),
inc_strided_(0),
inc_next_(0),
inc_advance_(0) {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: stride_({layout.stride(0), layout.stride(1)}) {
inc_contiguous_ =
(LongIndex(stride_[0]) * ThreadMap::Delta::kContiguous) *
sizeof_bits<Element>::value / 8;
inc_strided_ = (LongIndex(stride_[1]) * ThreadMap::Delta::kStrided) *
sizeof_bits<Element>::value / 8;
inc_next_strided_ = inc_strided_ -
LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_;
if (kAdvanceRank) {
// advance along strided dimension
inc_advance_ = Shape::kStrided * LongIndex(stride_[1]) *
sizeof_bits<Element>::value / 8;
} else {
// advance along contiguous dimension
inc_advance_ =
Shape::kContiguous * stride_[0] * sizeof_bits<Element>::value / 8;
}
inc_next_ = inc_advance_ -
LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_ -
LongIndex(ThreadMap::Iterations::kStrided - 1) * inc_strided_;
};
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char*;
//
// Data members
//
/// Parameters object with precomputed internal state
Params params_;
/// Internal pointer to first access of tile
BytePointer pointer_;
UnderlyingPredicates the_predicates;
Mask residual_tile_mask;
private:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent,
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
the_predicates.compute_predicates_(extent, is_steady_state);
}
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
///< Precomputed parameters object
Params const& params,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const& threadblock_offset,
int const* indices =
nullptr ///< gather/scatter indices, note no support for
///< gather/scatter at this specialization
)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
the_predicates(extent) {
the_predicates.set_predicates(thread_id, threadblock_offset);
// update internal pointers
Layout layout(params_.stride_);
add_pointer_offset(layout(the_predicates.thread_offset_));
}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
Params const& params, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
the_predicates.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool is_residual_tile) {
if (is_residual_tile) {
the_predicates.set_mask(residual_tile_mask);
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1]);
pointer_ += Shape::kContiguous * tile_offset[0];
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0]);
pointer_ += Shape::kStrided * tile_offset[1];
}
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
return reinterpret_cast<AccessType*>(pointer_) +
the_predicates.iteration_vector_;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
the_predicates.operator++();
++the_predicates.iteration_vector_;
if (the_predicates.iteration_vector_ < kAccessesPerVector) {
return *this;
}
the_predicates.iteration_vector_ = 0;
++the_predicates.iteration_contiguous_;
if (the_predicates.iteration_contiguous_ <
ThreadMap::Iterations::kContiguous) {
pointer_ += params_.inc_contiguous_;
return *this;
}
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
the_predicates.iteration_contiguous_ = 0;
++the_predicates.iteration_strided_;
if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) {
pointer_ += params_.inc_next_strided_;
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
the_predicates.iteration_strided_ = 0;
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
the_predicates.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
the_predicates.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
the_predicates.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
the_predicates.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return the_predicates.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank 2
/// column-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::AffineRank2ColumnMajor,
AdvanceRank,
ThreadMap_,
AccessType_,
false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRank2ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
// Map to the underlying AffineRankN<2> layout
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::AffineRankN<2>,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorResidualLast;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given an AffineRankN<2> tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1))){};
};
private:
//
// Data members
//
/// Underlying AffineRankN<2> tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
///< Precomputed parameters object
Params const& params,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const& threadblock_offset,
int const* indices =
nullptr ///< gather/scatter indices, note no support for
///< gather/scatter at this specialization
)
: iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row(),
threadblock_offset.column())) {}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
Params const& params, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iterator_.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool enable) {
iterator_.set_residual_tile(enable);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
iterator_.add_tile_offset(
make_Coord(tile_offset.row(), tile_offset.column()));
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
return reinterpret_cast<AccessType*>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
iterator_.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank-2
/// row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::AffineRank2RowMajor,
AdvanceRank,
ThreadMap_,
AccessType_,
false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRank2RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
// Map to the underlying AffineRankN<2> layout
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::AffineRankN<2>,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorResidualLast;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given an AffineRankN<2> tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))){};
};
private:
//
// Data members
//
/// Underlying AffineRankN<2> tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
///< Precomputed parameters object
Params const& params,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const& threadblock_offset,
int const* indices =
nullptr ///< gather/scatter indices, note no support for
///< gather/scatter at this specialization
)
: iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
Params const& params, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iterator_.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool enable) {
iterator_.set_residual_tile(enable);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
iterator_.add_tile_offset(
make_Coord(tile_offset.column(), tile_offset.row()));
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
return reinterpret_cast<AccessType*>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
iterator_.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for column-major
/// interleaved data. It is mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_,
int InterleavedK>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::ColumnMajorInterleaved<InterleavedK>,
AdvanceRank,
ThreadMap_,
AccessType_,
false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
layout::PitchLinearShape<
Shape::kRow * kInterleavedK,
Shape::kColumn / kInterleavedK>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorResidualLast;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const& base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
/// Precomputed parameters object
Params const& params,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const& threadblock_offset,
int const* indices =
nullptr ///< gather/scatter indices, note no support for
///< gather/scatter at this specialization
)
: iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(
extent.row() * kInterleavedK,
extent.column() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row() * kInterleavedK,
threadblock_offset.column() / kInterleavedK)) {}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
Params const& params, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iterator_.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool enable) {
iterator_.set_residual_tile(enable);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
return reinterpret_cast<AccessType*>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
iterator_.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for row-major
/// interleaved data.
// It is mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_,
int InterleavedK>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::RowMajorInterleaved<InterleavedK>,
AdvanceRank,
ThreadMap_,
AccessType_,
false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::RowMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
layout::PitchLinearShape<
Shape::kColumn * kInterleavedK,
Shape::kRow / kInterleavedK>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorResidualLast;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const& base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
/// Precomputed parameters object
Params const& params,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const& threadblock_offset,
int const* indices =
nullptr ///< gather/scatter indices, note no support for
///< gather/scatter at this specialization
)
: iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(
extent.column() * kInterleavedK,
extent.row() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column() * kInterleavedK,
threadblock_offset.row() / kInterleavedK)) {}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
Params const& params, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iterator_.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool enable) {
iterator_.set_residual_tile(enable);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
return reinterpret_cast<AccessType*>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
iterator_.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/iterators/predicated_tile_access_iterator_residual_last.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/iterators/predicated_tile_access_iterator_residual_last.h",
"repo_id": "examples",
"token_count": 22473
} | 7 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator without splitk
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename OutputOp_ ///< Output operator
>
class FusedBiasActEpilogue {
public:
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using OutputOp = OutputOp_;
/// Output layout is always row-major
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
public:
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
public:
/// Constructor
CUTLASS_DEVICE
FusedBiasActEpilogue(
){ }
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
AccumulatorTile &accumulators, ///< Complete warp-level accumulator tile
AccumulatorTile & fused_bias_act_accumlators,
OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
bool need_bias = output_op.is_source_needed();
if (need_bias)
compute_source_needed_(output_op, accumulators, fused_bias_act_accumlators, source_iterator);
else
compute_source_no_needed_(output_op, accumulators, fused_bias_act_accumlators);
}
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
AccumulatorTile &accumulators, ///< Complete warp-level accumulator tile
AccumulatorTile & fused_bias_act_accumlators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
compute_source_no_needed_(output_op, accumulators, fused_bias_act_accumlators);
}
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
AccumulatorTile &accumulators, ///< Complete warp-level accumulator tile
AccumulatorTile & fused_bias_act_accumlators,
OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
AccumulatorFragmentIterator fused_bias_act_fragment_iterator(fused_bias_act_accumlators);
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
source_iterator.load(source_fragment);
++source_iterator;
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
typename AccumulatorFragmentIterator::Fragment fused_bias_act_fragment;
fused_bias_act_fragment = output_op(accum_fragment, source_fragment);
fused_bias_act_fragment_iterator.store(fused_bias_act_fragment);
++fused_bias_act_fragment_iterator;
}
}
CUTLASS_DEVICE
void compute_source_no_needed_(
OutputOp const &output_op, ///< Output operator
AccumulatorTile &accumulators, ///< Complete warp-level accumulator tile
AccumulatorTile & fused_bias_act_accumlators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
AccumulatorFragmentIterator fused_bias_act_fragment_iterator(fused_bias_act_accumlators);
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < AccumulatorFragmentIterator::kIterations; ++iter) {
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
typename AccumulatorFragmentIterator::Fragment fused_bias_act_fragment;
fused_bias_act_fragment = output_op(accum_fragment);
fused_bias_act_fragment_iterator.store(fused_bias_act_fragment);
++fused_bias_act_fragment_iterator;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/fused_bias_act_epilogue.h/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/fused_bias_act_epilogue.h",
"repo_id": "examples",
"token_count": 2743
} | 8 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import os
class replace_fix_impl:
def __init__(self, src_dir, dst_dir, cutlass_deps_root):
self.src_dir = src_dir
self.dst_dir = dst_dir
self.cutlass_deps_root = cutlass_deps_root
def gen_code(self):
for sub_dir in os.walk(self.src_dir):
files_in_sub_dir = sub_dir[2]
src_dirs = sub_dir[0]
output_dirs = self.dst_dir + sub_dir[0][len(self.src_dir):]
if not os.path.exists(output_dirs):
os.mkdir(output_dirs)
for f in files_in_sub_dir:
with open(src_dirs +"/" + f, 'r') as current_file:
output_lines = []
lines = current_file.readlines()
for line in lines:
if(len(line) >= len("#include \"cutlass") and line[:len("#include \"cutlass")] == "#include \"cutlass"):
new_line = "#include \"" + self.cutlass_deps_root + line[len("#include \""):]
# print(new_line)
output_lines.append(new_line)
else:
output_lines.append(line)
with open(output_dirs + "/" + f, "w+") as dest_file:
dest_file.writelines(output_lines)
| examples/44_multi_gemm_ir_and_codegen/ir_gen/replace_fix_impl_header.py/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/ir_gen/replace_fix_impl_header.py",
"repo_id": "examples",
"token_count": 1202
} | 9 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing elementwise operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/epilogue/collective/detail.hpp"
#include "cute/tensor.hpp"
#include "cute/numeric/numeric_types.hpp"
#include "gather_tensor.hpp"
namespace cutlass::epilogue::collective {
/// Applies an element wise operation to all elements within the fragment
/// and scatter-writes them out to destination storage.
/// GatherC and ScatterD are types of user-defined functions that apply the
/// transoformation of the strided coordinate (e.g. through an index array).
template <
class StrideC_,
class StrideD_,
class ThreadEpilogueOp_,
class EpilogueSchedule_,
class GatherC_,
class ScatterD_
>
class EpilogueGatherScatter {
public:
//
// Type Aliases
//
using EpilogueSchedule = EpilogueSchedule_;
// derived types of output thread level operator
using ThreadEpilogueOp = ThreadEpilogueOp_;
using ElementOutput = typename ThreadEpilogueOp::ElementOutput;
using ElementAccumulator = typename ThreadEpilogueOp::ElementAccumulator;
using ElementCompute = typename ThreadEpilogueOp::ElementCompute;
using ElementScalar = ElementCompute;
using ElementC = typename ThreadEpilogueOp::ElementC;
using StrideC = StrideC_;
using ElementD = typename ThreadEpilogueOp::ElementD;
using StrideD = StrideD_;
// Every epilogue needs these two GmemTiledCopy{C,D} aliases.
// If you don't know what they should be, just use void.
using GmemTiledCopyC = void;
using GmemTiledCopyD = void;
using GatherC = GatherC_;
using ScatterD = ScatterD_;
static const int kOutputAlignment = ThreadEpilogueOp::kCount;
using AlignmentType = typename cute::uint_bit<sizeof_bits<ElementOutput>::value * kOutputAlignment>::type;
static_assert(cute::rank(StrideC{}) == 3, "StrideCD must be rank-3: [M, N, L]");
static_assert(cute::rank(StrideD{}) == 3, "StrideCD must be rank-3: [M, N, L]");
struct SharedStorage { };
// Host side epilogue arguments
struct Arguments {
typename ThreadEpilogueOp::Params thread_params{};
ElementC const* ptr_C = nullptr;
StrideC dC{};
ElementD* ptr_D = nullptr;
StrideD dD{};
GatherC gather_C{};
ScatterD scatter_D{};
};
// Device side epilogue params
using Params = Arguments;
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(
[[maybe_unused]] ProblemShape const& _,
Arguments const& args,
[[maybe_unused]] void* workspace) {
return args;
}
template<class ProblemShape>
static bool
can_implement(
[[maybe_unused]] ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
return true;
}
CUTLASS_HOST_DEVICE
EpilogueGatherScatter(Params const& params_) : params(params_) { }
template<
class ProblemShapeMNKL,
class BlockShapeMNK,
class BlockCoordMNKL,
class FrgEngine, class FrgLayout,
class TiledMma,
class ResidueMNK
>
CUTLASS_DEVICE void
operator()(
ProblemShapeMNKL problem_shape_mnkl,
BlockShapeMNK blk_shape_MNK,
BlockCoordMNKL blk_coord_mnkl,
cute::Tensor<FrgEngine, FrgLayout> const& accumulators,
TiledMma tiled_mma,
ResidueMNK residue_mnk,
int thread_idx,
char* smem_buf)
{
using namespace cute;
using X = Underscore;
static_assert(cute::rank(ProblemShapeMNKL{}) == 4, "ProblemShapeMNKL must be rank 4");
static_assert(is_static<BlockShapeMNK>::value, "ThreadBlock tile shape must be static");
static_assert(cute::rank(BlockShapeMNK{}) == 3, "BlockShapeMNK must be rank 3");
static_assert(cute::rank(BlockCoordMNKL{}) == 4, "BlockCoordMNKL must be rank 3");
(void) smem_buf;
ThreadEpilogueOp epilogue_op{params.thread_params};
// Separate out problem shape for convenience
auto M = get<0>(problem_shape_mnkl);
auto N = get<1>(problem_shape_mnkl);
auto L = get<3>(problem_shape_mnkl);
auto stride_c = detail::get_epilogue_stride<EpilogueSchedule>(params.dC);
auto stride_d = detail::get_epilogue_stride<EpilogueSchedule>(params.dD);
// Represent the full output tensor
Tensor mC_mnl = make_gather_tensor(make_gmem_ptr(params.ptr_C), make_shape(M,N,L), stride_c, params.gather_C); // (m,n,l)
Tensor mD_mnl = make_gather_tensor(make_gmem_ptr(params.ptr_D), make_shape(M,N,L), stride_d, params.scatter_D); // (m,n,l)
Tensor gC_mnl = local_tile(mC_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l)
Tensor gD_mnl = local_tile(mD_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l)
// Slice to get the tile this CTA is responsible for
auto [m_coord, n_coord, k_coord, l_coord] = blk_coord_mnkl;
Tensor gC = gC_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N)
Tensor gD = gD_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N)
// Partition source and destination tiles to match the accumulator partitioning
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCgD = thr_mma.partition_C(gD); // (VEC,THR_M,THR_N)
Tensor tCgC = thr_mma.partition_C(gC); // (VEC,THR_M,THR_N)
static_assert(is_static<FrgLayout>::value, "Accumulator layout must be static");
CUTE_STATIC_ASSERT_V(size(tCgC) == size(tCgD),
"Source and destination must have the same number of elements.");
CUTE_STATIC_ASSERT_V(size(tCgD) == size(accumulators),
"Accumulator count must have the same destination element count.");
// Make an identity coordinate tensor for predicating our output MN tile
auto cD = make_identity_tensor(make_shape(unwrap(shape<0>(gD)), unwrap(shape<1>(gD))));
Tensor tCcD = thr_mma.partition_C(cD);
// source is needed
if (epilogue_op.is_source_needed()) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(accumulators); ++i) {
if (elem_less(tCcD(i), make_coord(get<0>(residue_mnk), get<1>(residue_mnk)))) {
tCgD(i) = epilogue_op(accumulators(i), tCgC(i));
}
}
}
// source is not needed, avoid load
else {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(accumulators); ++i) {
if (elem_less(tCcD(i), make_coord(get<0>(residue_mnk), get<1>(residue_mnk)))) {
tCgD(i) = epilogue_op(accumulators(i));
}
}
}
}
private:
Params params;
};
} // namespace cutlass::epilogue::collective
| examples/52_hopper_gather_scatter_fusion/scatter_epilogue.hpp/0 | {
"file_path": "examples/52_hopper_gather_scatter_fusion/scatter_epilogue.hpp",
"repo_id": "examples",
"token_count": 3287
} | 10 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "cutlass/util/helper_cuda.hpp"
template <class ProblemShape, class CtaTiler,
class TA, class AStride, class ASmemLayout, class TiledCopyA,
class TB, class BStride, class BSmemLayout, class TiledCopyB,
class TC, class CStride, class CSmemLayout, class TiledMma,
class Alpha, class Beta>
__global__ static
__launch_bounds__(decltype(size(TiledMma{}))::value)
void
gemm_device(ProblemShape shape_MNK, CtaTiler cta_tiler,
TA const* A, AStride dA, ASmemLayout sA_layout, TiledCopyA copy_a,
TB const* B, BStride dB, BSmemLayout sB_layout, TiledCopyB copy_b,
TC * C, CStride dC, CSmemLayout , TiledMma mma,
Alpha alpha, Beta beta)
{
using namespace cute;
// Preconditions
CUTE_STATIC_ASSERT_V(rank(shape_MNK) == Int<3>{}); // (M, N, K)
CUTE_STATIC_ASSERT_V(rank(cta_tiler) == Int<3>{}); // (BLK_M, BLK_N, BLK_K)
CUTE_STATIC_ASSERT_V(size(copy_a) == size(mma)); // NumThreads
CUTE_STATIC_ASSERT_V(size(copy_b) == size(mma)); // NumThreads
static_assert(is_static<ASmemLayout>::value);
static_assert(is_static<BSmemLayout>::value);
static_assert(is_static<CSmemLayout>::value);
CUTE_STATIC_ASSERT_V(size<0>(ASmemLayout{}) == size<0>(cta_tiler)); // BLK_M
CUTE_STATIC_ASSERT_V(size<0>(CSmemLayout{}) == size<0>(cta_tiler)); // BLK_M
CUTE_STATIC_ASSERT_V(size<0>(BSmemLayout{}) == size<1>(cta_tiler)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(CSmemLayout{}) == size<1>(cta_tiler)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(ASmemLayout{}) == size<2>(cta_tiler)); // BLK_K
CUTE_STATIC_ASSERT_V(size<1>(BSmemLayout{}) == size<2>(cta_tiler)); // BLK_K
CUTE_STATIC_ASSERT_V(congruent(select<0,2>(shape_MNK), dA)); // dA strides for shape MK
CUTE_STATIC_ASSERT_V(congruent(select<1,2>(shape_MNK), dB)); // dB strides for shape NK
CUTE_STATIC_ASSERT_V(congruent(select<0,1>(shape_MNK), dC)); // dC strides for shape MN
//
// Full and Tiled Tensors
//
// Represent the full tensors
Tensor mA = make_tensor(make_gmem_ptr(A), select<0,2>(shape_MNK), dA); // (M,K)
Tensor mB = make_tensor(make_gmem_ptr(B), select<1,2>(shape_MNK), dB); // (N,K)
Tensor mC = make_tensor(make_gmem_ptr(C), select<0,1>(shape_MNK), dC); // (M,N)
// Get the appropriate blocks for this thread block
auto cta_coord = make_coord(blockIdx.x, blockIdx.y, _); // (m,n,k)
Tensor gA = local_tile(mA, cta_tiler, cta_coord, Step<_1, X,_1>{}); // (BLK_M,BLK_K,k)
Tensor gB = local_tile(mB, cta_tiler, cta_coord, Step< X,_1,_1>{}); // (BLK_N,BLK_K,k)
Tensor gC = local_tile(mC, cta_tiler, cta_coord, Step<_1,_1, X>{}); // (BLK_M,BLK_N)
// Shared memory buffers
__shared__ TA smemA[cosize_v<ASmemLayout>];
__shared__ TB smemB[cosize_v<BSmemLayout>];
Tensor sA = make_tensor(make_smem_ptr(smemA), sA_layout); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(smemB), sB_layout); // (BLK_N,BLK_K,PIPE)
//
// Partition the copying of A and B tiles across the threads
//
ThrCopy thr_copy_a = copy_a.get_slice(threadIdx.x);
Tensor tAgA = thr_copy_a.partition_S(gA); // (CPY,CPY_M,CPY_K,k)
Tensor tAsA = thr_copy_a.partition_D(sA); // (CPY,CPY_M,CPY_K,PIPE)
ThrCopy thr_copy_b = copy_b.get_slice(threadIdx.x);
Tensor tBgB = thr_copy_b.partition_S(gB); // (CPY,CPY_N,CPY_K,k)
Tensor tBsB = thr_copy_b.partition_D(sB); // (CPY,CPY_N,CPY_K,PIPE)
CUTE_STATIC_ASSERT_V(size<1>(tAgA) == size<1>(tAsA)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tAgA) == size<2>(tAsA)); // CPY_K
CUTE_STATIC_ASSERT_V(size<1>(tBgB) == size<1>(tBsB)); // CPY_N
CUTE_STATIC_ASSERT_V(size<2>(tBgB) == size<2>(tBsB)); // CPY_K
//
// PREFETCH
//
auto K_PIPE_MAX = size<3>(tAsA);
// Total count of tiles
int k_tile_count = size<3>(tAgA);
// Current tile index in gmem to read from
int k_tile_next = 0;
// Start async loads for all pipes but the last
CUTE_UNROLL
for (int k_pipe = 0; k_pipe < K_PIPE_MAX-1; ++k_pipe) {
copy(copy_a, tAgA(_,_,_,k_tile_next), tAsA(_,_,_,k_pipe));
copy(copy_b, tBgB(_,_,_,k_tile_next), tBsB(_,_,_,k_pipe));
cp_async_fence();
--k_tile_count;
if (k_tile_count > 0) { ++k_tile_next; }
}
//
// Define A/B partitioning and C accumulators
//
ThrMMA thr_mma = mma.get_slice(threadIdx.x);
Tensor tCsA = thr_mma.partition_A(sA); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCsB = thr_mma.partition_B(sB); // (MMA,MMA_N,MMA_K,PIPE)
Tensor tCgC = thr_mma.partition_C(gC); // (MMA,MMA_M,MMA_N)
// Allocate registers for pipelining
Tensor tCrA = thr_mma.make_fragment_A(tCsA(_,_,_,0)); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.make_fragment_B(tCsB(_,_,_,0)); // (MMA,MMA_N,MMA_K)
// Allocate the accumulators -- same size as the projected data
Tensor tCrC = thr_mma.make_fragment_C(tCgC); // (MMA,MMA_M,MMA_N)
CUTE_STATIC_ASSERT_V(( shape(tCrA) == take<0,3>(shape(tCsA)))); // (MMA,MMA_M,MMA_K)
CUTE_STATIC_ASSERT_V(( shape(tCrB) == take<0,3>(shape(tCsB)))); // (MMA,MMA_N,MMA_K)
CUTE_STATIC_ASSERT_V(( shape(tCrC) == take<0,3>(shape(tCgC)))); // (MMA,MMA_M,MMA_N)
CUTE_STATIC_ASSERT_V((size<1>(tCgC) == size<1>(tCsA))); // MMA_M
CUTE_STATIC_ASSERT_V((size<2>(tCgC) == size<1>(tCsB))); // MMA_N
CUTE_STATIC_ASSERT_V((size<2>(tCsA) == size<2>(tCsB))); // MMA_K
// Clear the accumulators
clear(tCrC);
#if 0
if(thread0()) {
print(" mA : "); print( mA); print("\n");
print(" gA : "); print( gA); print("\n");
print(" sA : "); print( sA); print("\n");
print("tAgA : "); print(tAgA); print("\n");
print("tAsA : "); print(tAsA); print("\n");
}
#endif
#if 0
if(thread0()) {
print(" mB : "); print( mB); print("\n");
print(" gB : "); print( gB); print("\n");
print(" sB : "); print( sB); print("\n");
print("tBgB : "); print(tBgB); print("\n");
print("tBsB : "); print(tBsB); print("\n");
}
#endif
#if 0
if(thread0()) {
print(" mC : "); print( mC); print("\n");
print(" gC : "); print( gC); print("\n");
print("tCsA : "); print(tCsA); print("\n");
print("tCsB : "); print(tCsB); print("\n");
print("tCgC : "); print(tCgC); print("\n");
print("tCrA : "); print(tCrA); print("\n");
print("tCrB : "); print(tCrB); print("\n");
print("tCrC : "); print(tCrC); print("\n");
}
#endif
#if 1
// Current pipe index in smem to read from
int smem_pipe_read = 0;
// Current pipe index in smem to write to
int smem_pipe_write = K_PIPE_MAX-1;
// Pipe slice
Tensor tCsA_p = tCsA(_,_,_,smem_pipe_read);
Tensor tCsB_p = tCsB(_,_,_,smem_pipe_read);
// Size of the register pipeline
auto K_BLOCK_MAX = size<2>(tCrA);
// PREFETCH register pipeline
if (K_BLOCK_MAX > 1) {
// Wait until our first prefetched tile is loaded in
cp_async_wait<K_PIPE_MAX-2>();
__syncthreads();
// Prefetch the first rmem from the first k-tile
copy(tCsA_p(_,_,Int<0>{}), tCrA(_,_,Int<0>{}));
copy(tCsB_p(_,_,Int<0>{}), tCrB(_,_,Int<0>{}));
}
//
// PIPELINED MAIN LOOP
// TUTORIAL: Example of a gemm loop that pipelines shared memory using SM80's cp.async instructions
// and explicit pipelines in shared memory.
// Data is read from global(k_tile_next) to shared(smem_pipe_write).
// Data is read from shared(smem_pipe_read) to registers(k_block_next).
// Data is computed on registers(b_block).
//
// This allows all copies and compute to overlap:
// Copy from gmem->smem can overlap with copies from smem->rmem and compute on rmem.
// Copy from smem->rmem can overlap with compute on rmem.
//
CUTE_NO_UNROLL
while (k_tile_count > -(K_PIPE_MAX-1))
{
CUTE_UNROLL
for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block)
{
if (k_block == K_BLOCK_MAX - 1)
{
// Slice the smem_pipe_read smem
tCsA_p = tCsA(_,_,_,smem_pipe_read);
tCsB_p = tCsB(_,_,_,smem_pipe_read);
// Commit the smem for smem_pipe_read
cp_async_wait<K_PIPE_MAX-2>();
__syncthreads();
}
// Load A, B shmem->regs for k_block+1
auto k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static
copy(tCsA_p(_,_,k_block_next), tCrA(_,_,k_block_next));
copy(tCsB_p(_,_,k_block_next), tCrB(_,_,k_block_next));
// Copy gmem to smem before computing gemm on each k-pipe
if (k_block == 0)
{
copy(copy_a, tAgA(_,_,_,k_tile_next), tAsA(_,_,_,smem_pipe_write));
copy(copy_b, tBgB(_,_,_,k_tile_next), tBsB(_,_,_,smem_pipe_write));
cp_async_fence();
// Advance the gmem tile
--k_tile_count;
if (k_tile_count > 0) { ++k_tile_next; }
// Advance the smem pipe
smem_pipe_write = smem_pipe_read;
++smem_pipe_read;
smem_pipe_read = (smem_pipe_read == K_PIPE_MAX) ? 0 : smem_pipe_read;
}
// Thread-level register gemm for k_block
gemm(mma, tCrA(_,_,k_block), tCrB(_,_,k_block), tCrC);
}
}
#endif
//
// Epilogue
//
axpby(alpha, tCrC, beta, tCgC);
}
// Setup params for a NT GEMM
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm_nt(int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
using namespace cute;
// Define shapes (dynamic)
auto M = int(m);
auto N = int(n);
auto K = int(k);
auto prob_shape = make_shape(M, N, K); // (M, N, K)
// Define NT strides (mixed)
auto dA = make_stride(Int<1>{}, ldA); // (dM, dK)
auto dB = make_stride(Int<1>{}, ldB); // (dN, dK)
auto dC = make_stride(Int<1>{}, ldC); // (dM, dN)
// Define CTA tile sizes (static)
auto bM = Int<128>{};
auto bN = Int<128>{};
auto bK = Int< 8>{};
auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K)
auto bP = Int<3>{}; // Pipeline
// Define the smem layouts (static)
auto sA = make_layout(make_shape(bM, bK, bP)); // (m,k,p) -> smem_idx; m-major
auto sB = make_layout(make_shape(bN, bK, bP)); // (n,k,p) -> smem_idx; n-major
auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx; m-major
// Define the thread layouts (static)
TiledCopy copyA = make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<uint128_t>, TA>{},
Layout<Shape<_32,_8>>{}, // Thr layout 32x8 m-major
Layout<Shape< _4,_1>>{});// Val layout 4x1 m-major
TiledCopy copyB = make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<uint128_t>, TB>{},
Layout<Shape<_32,_8>>{}, // Thr layout 32x8 n-major
Layout<Shape< _4,_1>>{});// Val layout 4x1 n-major
TiledMMA mmaC = make_tiled_mma(UniversalFMA<TC,TA,TB>{},
Layout<Shape<_16,_16,_1>>{}); // 16x16x1 TiledMMA
#if 0
print(copyA);
print(copyB);
print(mmaC);
#endif
#if 0
print_latex(copyA);
print_latex(copyB);
print_latex(mmaC);
#endif
dim3 dimBlock(size(mmaC));
dim3 dimGrid(size(ceil_div(M, bM)),
size(ceil_div(N, bN)));
gemm_device<<<dimGrid, dimBlock, 0, stream>>>
(prob_shape, cta_tiler,
A, dA, sA, copyA,
B, dB, sB, copyB,
C, dC, sC, mmaC,
alpha, beta);
}
// Setup params for a TN GEMM
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm_tn(int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
using namespace cute;
// Define shapes (dynamic)
auto M = int(m);
auto N = int(n);
auto K = int(k);
auto prob_shape = make_shape(M, N, K); // (M, N, K)
// Define TN strides (mixed)
auto dA = make_stride(ldA, Int<1>{}); // (dM, dK)
auto dB = make_stride(ldB, Int<1>{}); // (dN, dK)
auto dC = make_stride(Int<1>{}, ldC); // (dM, dN)
// Define CTA tile sizes (static)
auto bM = Int<128>{};
auto bN = Int<128>{};
auto bK = Int< 8>{};
auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K)
auto bP = Int<3>{}; // Pipeline
// Define the smem layouts (static)
auto sA_atom = make_layout(make_shape ( bM, bK),
make_stride(Int<1>{}, bM+Int<1>{})); // (m,k) -> smem_idx; padded m-major
[[maybe_unused]] auto sB_atom = make_layout(make_shape ( bN, bK),
make_stride(Int<1>{}, bN+Int<1>{})); // (n,k) -> smem_idx; padded n-major
auto sA = tile_to_shape(sA_atom, make_shape(bM, bK, bP));
auto sB = tile_to_shape(sA_atom, make_shape(bN, bK, bP));
auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx
// Define the thread layouts (static)
TiledCopy copyA = make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<TA>, TA>{},
Layout<Shape<_32,_8>,Stride<_8,_1>>{}, // Thr layout 32x8 k-major
Layout<Shape< _1,_1>>{}); // Val layout 1x1
TiledCopy copyB = make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<TB>, TB>{},
Layout<Shape<_32,_8>,Stride<_8,_1>>{}, // Thr layout 32x8 k-major
Layout<Shape< _1,_1>>{}); // Val layout 1x1
TiledMMA mmaC = make_tiled_mma(UniversalFMA<TC,TA,TB>{},
Layout<Shape<_16,_16,_1>>{}); // 16x16x1 TiledMMA
#if 0
print(copyA);
print(copyB);
print(mmaC);
#endif
#if 0
print_latex(copyA);
print_latex(copyB);
print_latex(mmaC);
#endif
dim3 dimBlock(size(mmaC));
dim3 dimGrid(size(ceil_div(M, bM)),
size(ceil_div(N, bN)));
gemm_device<<<dimGrid, dimBlock, 0, stream>>>
(prob_shape, cta_tiler,
A, dA, sA, copyA,
B, dB, sB, copyB,
C, dC, sC, mmaC,
alpha, beta);
}
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm(char transA, char transB, int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
if (transA == 'N' && transB == 'T') {
return gemm_nt(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream);
} else
if (transA == 'T' && transB == 'N') {
return gemm_tn(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream);
}
assert(false && "Not implemented");
}
int main(int argc, char** argv)
{
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (props.major < 8) {
std::cout << "This example requires an Ampere GPU or newer (CC >= 80)" << std::endl;
// Return 0 so tests pass if run on unsupported architectures or CUDA Toolkits.
return 0;
}
int m = 5120;
if (argc >= 2)
sscanf(argv[1], "%d", &m);
int n = 5120;
if (argc >= 3)
sscanf(argv[2], "%d", &n);
int k = 4096;
if (argc >= 4)
sscanf(argv[3], "%d", &k);
char transA = 'N';
if (argc >= 5)
sscanf(argv[4], "%c", &transA);
char transB = 'T';
if (argc >= 6)
sscanf(argv[5], "%c", &transB);
using TA = float;
using TB = float;
using TC = float;
using TI = float;
TI alpha = 1.0;
TI beta = 0.0;
std::cout << "M = " << m << std::endl;
std::cout << "N = " << n << std::endl;
std::cout << "K = " << k << std::endl;
std::cout << "C = A^" << transA << " B^" << transB << std::endl;
thrust::host_vector<TA> h_A(m*k);
thrust::host_vector<TB> h_B(n*k);
thrust::host_vector<TC> h_C(m*n);
for (int j = 0; j < m*k; ++j) h_A[j] = static_cast<TA>( 2*(rand() / double(RAND_MAX)) - 1 );
for (int j = 0; j < n*k; ++j) h_B[j] = static_cast<TB>( 2*(rand() / double(RAND_MAX)) - 1 );
for (int j = 0; j < m*n; ++j) h_C[j] = static_cast<TC>(-1);
thrust::device_vector<TA> d_A = h_A;
thrust::device_vector<TB> d_B = h_B;
thrust::device_vector<TC> d_C = h_C;
double gflops = (2.0*m*n*k) * 1e-9;
const int timing_iterations = 100;
GPU_Clock timer;
int ldA = 0, ldB = 0, ldC = m;
if (transA == 'N') {
ldA = m;
} else if (transA == 'T') {
ldA = k;
} else {
assert(false);
}
if (transB == 'N') {
ldB = k;
} else if (transB == 'T') {
ldB = n;
} else {
assert(false);
}
// Run once
d_C = h_C;
gemm(transA, transB, m, n, k,
alpha,
d_A.data().get(), ldA,
d_B.data().get(), ldB,
beta,
d_C.data().get(), ldC);
CUTE_CHECK_LAST();
thrust::host_vector<TC> cute_result = d_C;
// Timing iterations
timer.start();
for (int i = 0; i < timing_iterations; ++i) {
gemm(transA, transB, m, n, k,
alpha,
d_A.data().get(), ldA,
d_B.data().get(), ldB,
beta,
d_C.data().get(), ldC);
}
double cute_time = timer.seconds() / timing_iterations;
CUTE_CHECK_LAST();
printf("CUTE_GEMM: [%6.1f]GFlop/s (%6.4f)ms\n", gflops / cute_time, cute_time*1000);
return 0;
}
| examples/cute/tutorial/sgemm_sm80.cu/0 | {
"file_path": "examples/cute/tutorial/sgemm_sm80.cu",
"repo_id": "examples",
"token_count": 9859
} | 11 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/algorithm/functional.hpp>
#include <cute/tensor_impl.hpp>
#include <cute/atom/mma_atom.hpp>
/** The gemm algorithm takes four (or three) tensors and computes
* D = A * B + C
* It dispatches based on the number of modes each tensor has:
*
* 1. `(V) x (V) => (V)`.
* The element-wise product of vectors. Dispatches to FMA or MMA.
* 2. `(M) x (N) => (M,N)`.
* The outer product of vectors. Dispatches to [3] with new mode K=(1).
* 3. `(M,K) x (N,K) => (M,N)`.
* The product of matrices. Dispatches to [5] with MMA vector-mode V.
* 4. `(V,M) x (V,N) => (V,M,N)`.
* The batched outer product of vectors. Accounts for register reuse and dispatches to [1] for each (m,n).
* 5. `(V,M,K) x (V,N,K) => (V,M,N)`.
* The batched product of matrices. Dispatches to [4] for each (k).
*/
namespace cute
{
//
// Three arguments to four
//
template <class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> & C)
{
return gemm(C, A, B, C);
}
template <class MMA,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> & C)
{
return gemm(mma, C, A, B, C);
}
//
// Accept mutable temporaries
//
template <class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> && C)
{
return gemm(C, A, B, C);
}
template <class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(Tensor<TD, DLayout> && D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
return gemm(D, A, B, C);
}
template <class MMA,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> && C)
{
return gemm(mma, C, A, B, C);
}
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> && D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
return gemm(mma, D, A, B, C);
}
//
// Default MMA is UniversalFMA
//
template <class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(Tensor<TD, DLayout> & D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
using MMA = MMA_Atom<UniversalFMA<typename Tensor<TD,DLayout>::value_type,
typename Tensor<TA,ALayout>::value_type,
typename Tensor<TB,BLayout>::value_type,
typename Tensor<TC,CLayout>::value_type>>;
return gemm(MMA{}, D, A, B, C);
}
//
// Thread-Local Register-Memory GEMMs
//
// Dispatch [1]: (V) x (V) => (V)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 1 && is_rmem<TD>::value &&
ALayout::rank == 1 && is_rmem<TA>::value &&
BLayout::rank == 1 && is_rmem<TB>::value &&
CLayout::rank == 1 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (V) Logical data
Tensor<TA, ALayout> const& A, // (V) Logical data
Tensor<TB, BLayout> const& B, // (V) Logical data
Tensor<TC, CLayout> const& C) // (V) Logical data
{
// No static assertions on (V), MMA checks compatibility
mma.call(D, A, B, C);
}
// Dispatch [2]: (M) x (N) => (M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 2 && is_rmem<TD>::value &&
ALayout::rank == 1 && is_rmem<TA>::value &&
BLayout::rank == 1 && is_rmem<TB>::value &&
CLayout::rank == 2 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (M,N) Logical data
Tensor<TA, ALayout> const& A, // (M) Logical data
Tensor<TB, BLayout> const& B, // (N) Logical data
Tensor<TC, CLayout> const& C) // (M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<0>(A) == size<0>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<0>(B) == size<1>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D));
gemm(mma,
D, // (M,N)
make_tensor(A.data(), append<2>(A.layout())), // (M,1)
make_tensor(B.data(), append<2>(B.layout())), // (N,1)
C); // (M,N)
}
// Dispatch [3]: (M,K) x (N,K) => (M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 2 && is_rmem<TD>::value &&
ALayout::rank == 2 && is_rmem<TA>::value &&
BLayout::rank == 2 && is_rmem<TB>::value &&
CLayout::rank == 2 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (M,N) Logical data
Tensor<TA, ALayout> const& A, // (M,K) Logical data
Tensor<TB, BLayout> const& B, // (N,K) Logical data
Tensor<TC, CLayout> const& C) // (M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<0>(A) == size<0>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<0>(B) == size<1>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(B)); // AK == BK
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D));
// Assert this is a 1-value MMA
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutC_TV{}) == Int<1>{});
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutA_TV{}) == Int<1>{});
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutB_TV{}) == Int<1>{});
gemm(mma,
make_tensor(D.data(), prepend<3>(D.layout())), // (1,M,N)
make_tensor(A.data(), prepend<3>(A.layout())), // (1,M,K)
make_tensor(B.data(), prepend<3>(B.layout())), // (1,N,K)
make_tensor(C.data(), prepend<3>(C.layout()))); // (1,M,N)
}
// Dispatch [4]: (V,M) x (V,N) => (V,M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 3 && is_rmem<TD>::value &&
ALayout::rank == 2 && is_rmem<TA>::value &&
BLayout::rank == 2 && is_rmem<TB>::value &&
CLayout::rank == 3 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (V,M,N) Logical data
Tensor<TA, ALayout> const& A, // (V,M) Logical data
Tensor<TB, BLayout> const& B, // (V,N) Logical data
Tensor<TC, CLayout> const& C) // (V,M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<1>(B) == size<2>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D) && size<2>(C) == size<2>(D));
auto M = size<1>(A);
auto N = size<1>(B);
// REGISTER .reuse OPTIMIZATIONS
// 64-bit traversal specialization -- serpentine path
if constexpr (decltype(size<0>(A))::value * sizeof(typename TA::value_type) == 8 &&
decltype(size<0>(B))::value * sizeof(typename TB::value_type) == 8)
{
#if 1 // NOTE: Row- vs Col- major could depend on the C-matrix order... (which we can test)
// Row-major serpentine iteration
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
int ns = (m & 1) ? N-1-n : n; // Serpentine coordinate
gemm(mma, D(_,m,ns), A(_,m), B(_,ns), C(_,m,ns));
}
}
#else
// Col-major serpentine iteration
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
int ms = (n & 1) ? M-1-m : m; // Serpentine coordinate
gemm(mma, D(_,ms,n), A(_,ms), B(_,n), C(_,ms,n));
}
}
#endif
} else
// 32-bit traversal specialization -- kinked serpentine path
if constexpr (decltype(size<0>(A))::value * sizeof(typename TA::value_type) == 4 &&
decltype(size<0>(B))::value * sizeof(typename TB::value_type) == 4)
{
#if 1 // NOTE: Row- vs Col- major could depend on the C-matrix order... (which we can test)
// Row-major kinked serpentine iteration
CUTE_UNROLL
for (int m = 0; m < M; m += 2) {
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
int ns = (m & 2) ? N-1-n : n;
gemm(mma, D(_,m+0,ns), A(_,m+0), B(_,ns), C(_,m+0,ns));
if (m+1 < M) {
gemm(mma, D(_,m+1,ns), A(_,m+1), B(_,ns), C(_,m+1,ns));
}
}
}
#else
// Col-major kinked serpentine iteration
CUTE_UNROLL
for (int n = 0; n < N; n += 2) {
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
// Kinked serpentine traversal for maximum register reuse
int ms = (n & 2) ? M-1-m : m;
gemm(mma, D(_,ms,n+0), A(_,ms), B(_,n+0), C(_,ms,n+0));
if (n+1 < N) {
gemm(mma, D(_,ms,n+1), A(_,ms), B(_,n+1), C(_,ms,n+1));
}
}
}
#endif
} else
// 64-bit + 32-bit traversal order -- keep A (64-bit) in the outer loop and serpentine B
if constexpr (decltype(size<0>(A))::value * sizeof(typename TA::value_type) == 8 &&
decltype(size<0>(B))::value * sizeof(typename TB::value_type) == 4) {
// Row-major serpentine iteration
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
int ns = (m & 1) ? N-1-n : n; // Serpentine coordinate
gemm(mma, D(_,m,ns), A(_,m), B(_,ns), C(_,m,ns));
}
}
} else
// 32-bit + 64-bit traversal order -- keep B (64-bit) in the outer loop and serpentine A
if constexpr (decltype(size<0>(A))::value * sizeof(typename TA::value_type) == 4 &&
decltype(size<0>(B))::value * sizeof(typename TB::value_type) == 8) {
// Col-major serpentine iteration
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
int ms = (n & 1) ? M-1-m : m; // Serpentine coordinate
gemm(mma, D(_,ms,n), A(_,ms), B(_,n), C(_,ms,n));
}
}
} else
// Fallback to serpentine loop
{
// Col-major serpentine iteration
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
int ms = (n & 1) ? M-1-m : m; // Serpentine coordinate
gemm(mma, D(_,ms,n), A(_,ms), B(_,n), C(_,ms,n));
}
}
}
}
// Dispatch [5]: (V,M,K) x (V,N,K) => (V,M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 3 && is_rmem<TD>::value &&
ALayout::rank == 3 && is_rmem<TA>::value &&
BLayout::rank == 3 && is_rmem<TB>::value &&
CLayout::rank == 3 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (V,M,N) Logical data
Tensor<TA, ALayout> const& A, // (V,M,K) Logical data
Tensor<TB, BLayout> const& B, // (V,N,K) Logical data
Tensor<TC, CLayout> const& C) // (V,M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<1>(B) == size<2>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<2>(A) == size<2>(B)); // AK == BK
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D) && size<2>(C) == size<2>(D));
auto K = size<2>(A);
CUTE_UNROLL
for (int k = 0; k < K; ++k) {
gemm(mma, D, A(_,_,k), B(_,_,k), C);
}
}
//
// Thread-Local Shared-Memory GEMMs
//
// Dispatch [1]: (V) x (V) => (V)
// Dispatch [2]: (M) x (N) => (M,N)
// Dispatch [3]: (M,K) x (N,K) => (M,N)
// Dispatch [4]: (V,M) x (V,N) => (V,M,N)
// Dispatch [5]: (V,M,K) x (V,N,K) => (V,M,N)
// Dispatch [3]: (M,K) x (N,K) => (M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 2 && is_rmem<TD>::value &&
ALayout::rank == 2 && is_smem<TA>::value &&
BLayout::rank == 2 && is_smem<TB>::value &&
CLayout::rank == 2 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (M,N) Logical data
Tensor<TA, ALayout> const& A, // (M,K) Logical data
Tensor<TB, BLayout> const& B, // (N,K) Logical data
Tensor<TC, CLayout> const& C) // (M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<0>(A) == size<0>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<0>(B) == size<1>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(B)); // AK == BK
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D));
// Assert this is a 1-value MMA
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutC_TV{}) == Int<1>{});
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutA_TV{}) == Int<1>{});
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutB_TV{}) == Int<1>{});
gemm(mma,
make_tensor(D.data(), prepend<3>(D.layout())), // (1,M,N)
make_tensor(A.data(), prepend<3>(A.layout())), // (1,M,K)
make_tensor(B.data(), prepend<3>(B.layout())), // (1,N,K)
make_tensor(C.data(), prepend<3>(C.layout()))); // (1,M,N)
}
// Dispatch [5]: (V,M,K) x (V,N,K) => (V,M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 3 && is_rmem<TD>::value &&
ALayout::rank == 3 && is_smem<TA>::value &&
BLayout::rank == 3 && is_smem<TB>::value &&
CLayout::rank == 3 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (V,M,N) Logical data
Tensor<TA, ALayout> const& A, // (V,M,K) Logical data
Tensor<TB, BLayout> const& B, // (V,N,K) Logical data
Tensor<TC, CLayout> const& C) // (V,M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<1>(B) == size<2>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<2>(A) == size<2>(B)); // AK == BK
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D) && size<2>(C) == size<2>(D));
auto rA = MMA_Atom<MMA>::make_fragment_A(A);
auto rB = MMA_Atom<MMA>::make_fragment_B(B);
auto K = size<2>(A);
CUTE_UNROLL
for (int k = 0; k < K; ++k)
{
copy(A(_,_,k), rA(_,_,k));
copy(B(_,_,k), rB(_,_,k));
// Thread-level register gemm for k
gemm(mma, D, rA(_,_,k), rB(_,_,k), C);
}
}
} // end namespace cute
| include/cute/algorithm/gemm.hpp/0 | {
"file_path": "include/cute/algorithm/gemm.hpp",
"repo_id": "include",
"token_count": 9057
} | 12 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/arch/mma.hpp>
#include <cute/tensor_impl.hpp>
namespace cute
{
namespace detail {
template <class X, class = void>
struct supports_output_scaling { static constexpr bool value = false; };
template <class X>
struct supports_output_scaling<X, void_t<decltype(declval<X>().accumulate_)>> { static constexpr bool value = true; };
} // end namespace detail
/**
* concept MMA_Traits
* {
* using ValTypeD = // Logical A-value type
* using ValTypeA = // Logical B-value type
* using ValTypeB = // Logical C-value type
* using ValTypeC = // Logical D-value type (NOTE: Not used? Assumed == ValTypeD)
*
* using FrgTypeA = // A-type consumed by MMA (if ommitted, same as ValTypeA)
* using FrgTypeB = // B_type consumed by MMA (if ommitted, same as ValTypeB)
* using FrgTypeC = // C_type consumed by MMA (if ommitted, same as ValTypeC)
*
* using Shape_MNK = // Logical MxNxK shape of the MMA
*
* using ThrID = // Logical thread id (tid) -> tidx
*
* using ALayout = // (Logical thread id (tid), Logical value id (vid)) -> Flat MK-coord
* using BLayout = // (Logical thread id (tid), Logical value id (vid)) -> Flat NK-coord
* using CLayout = // (Logical thread id (tid), Logical value id (vid)) -> Flat MN-coord
* };
*/
template <class MMAOperation, class... MMAOpArgs>
struct MMA_Traits
{
static_assert(sizeof(MMAOperation) == 0, "MMA_Traits not implemented for this MMA_Operation.");
};
template <class D, class A, class B, class C>
struct MMA_Traits<UniversalFMA<D,A,B,C>>
{
using ValTypeD = D;
using ValTypeA = A;
using ValTypeB = B;
using ValTypeC = C;
// Logical shape of the MMA
using Shape_MNK = Shape<_1,_1,_1>;
// Logical thread id (tid) -> tidx
using ThrID = Layout<_1>;
// (Logical thread id (tid), Logical value id (vid)) -> coord
// (tid,vid) -> (m,k)
using ALayout = Layout<Shape<_1,_1>>;
// (tid,vid) -> (n,k)
using BLayout = Layout<Shape<_1,_1>>;
// (tid,vid) -> (m,n)
using CLayout = Layout<Shape<_1,_1>>;
};
//
// Generic mma_unpack for any MMA_Traits
//
template <class MMA_Op, class... MMA_Args,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE constexpr
void
mma_unpack(MMA_Traits<MMA_Op, MMA_Args...> const& traits,
Tensor<TD, DLayout> & D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
static_assert(is_rmem<TD>::value, "Expected registers in MMA_Atom::call");
static_assert(is_rmem<TA>::value, "Expected registers in MMA_Atom::call");
static_assert(is_rmem<TB>::value, "Expected registers in MMA_Atom::call");
static_assert(is_rmem<TC>::value, "Expected registers in MMA_Atom::call");
// Register value types from the MMA_Operation register arrays
using RegTypeD = typename remove_extent<typename MMA_Op::DRegisters>::type;
using RegTypeA = typename remove_extent<typename MMA_Op::ARegisters>::type;
using RegTypeB = typename remove_extent<typename MMA_Op::BRegisters>::type;
using RegTypeC = typename remove_extent<typename MMA_Op::CRegisters>::type;
using MMATraits = MMA_Traits<MMA_Op, MMA_Args...>;
[[maybe_unused]] constexpr int RegNumD = extent<typename MMA_Op::DRegisters>::value;
constexpr int RegNumA = extent<typename MMA_Op::ARegisters>::value;
constexpr int RegNumB = extent<typename MMA_Op::BRegisters>::value;
constexpr int RegNumC = extent<typename MMA_Op::CRegisters>::value;
Tensor rA = recast<RegTypeA>(A);
Tensor rB = recast<RegTypeB>(B);
CUTE_STATIC_ASSERT_V(size(rA) == Int<RegNumA>{});
CUTE_STATIC_ASSERT_V(size(rB) == Int<RegNumB>{});
if constexpr (is_same<RegTypeD, void>::value)
{
static_assert(is_same<typename TD::value_type, typename TC::value_type>::value, "GMMA C and D value_type must match.");
static_assert(is_same<DLayout, CLayout>::value, "GMMA C and D layouts must match.");
// assert((void*)&C == (void*)&D);
Tensor rC = recast<RegTypeC>(D); // NOTE: D and C are same, so use mutable D
//CUTE_STATIC_ASSERT_V(size(rC) == Int<RegNumC>{});
if constexpr (detail::supports_output_scaling<MMATraits>::value) {
detail::explode(MMA_Op::fma,
rA, make_int_sequence<RegNumA>{},
rB, make_int_sequence<RegNumB>{},
rC, make_int_sequence<RegNumC>{},
&(traits.accumulate_), seq<0>{});
}
else {
detail::explode(MMA_Op::fma,
rA, make_int_sequence<RegNumA>{},
rB, make_int_sequence<RegNumB>{},
rC, make_int_sequence<RegNumC>{});
}
}
else {
Tensor rD = recast<RegTypeD>(D);
Tensor rC = recast<RegTypeC>(C);
CUTE_STATIC_ASSERT_V(size(rD) == Int<RegNumD>{});
CUTE_STATIC_ASSERT_V(size(rC) == Int<RegNumC>{});
if constexpr (detail::supports_output_scaling<MMATraits>::value) {
detail::explode(MMA_Op::fma,
rD, make_int_sequence<RegNumD>{},
rA, make_int_sequence<RegNumA>{},
rB, make_int_sequence<RegNumB>{},
rC, make_int_sequence<RegNumC>{},
&(traits.accumulate_), seq<0>{});
}
else {
detail::explode(MMA_Op::fma,
rD, make_int_sequence<RegNumD>{},
rA, make_int_sequence<RegNumA>{},
rB, make_int_sequence<RegNumB>{},
rC, make_int_sequence<RegNumC>{});
}
}
}
//
// Accept mutable temporaries
//
template <class MMA_Op, class... MMA_Args,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE constexpr
void
mma_unpack(MMA_Traits<MMA_Op, MMA_Args...> const& traits,
Tensor<TD, DLayout> && D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
mma_unpack(traits, D, A, B, C);
}
namespace detail {
template <class X, class = void>
struct FrgTypeA_or_Default { using type = typename X::ValTypeA; };
template <class X>
struct FrgTypeA_or_Default<X,void_t<typename X::FrgTypeA>> { using type = typename X::FrgTypeA; };
template <class X, class = void>
struct FrgTypeB_or_Default { using type = typename X::ValTypeB; };
template <class X>
struct FrgTypeB_or_Default<X,void_t<typename X::FrgTypeB>> { using type = typename X::FrgTypeB; };
template <class X, class = void>
struct FrgTypeC_or_Default { using type = typename X::ValTypeC; };
template <class X>
struct FrgTypeC_or_Default<X,void_t<typename X::FrgTypeC>> { using type = typename X::FrgTypeC; };
} // end namespace detail
} // namespace cute
| include/cute/atom/mma_traits.hpp/0 | {
"file_path": "include/cute/atom/mma_traits.hpp",
"repo_id": "include",
"token_count": 3563
} | 13 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp> // iterator_traits
#include <cute/container/array_subbyte.hpp>
#include <cute/pointer_base.hpp>
#include <cute/swizzle.hpp>
/* This implements a swizzle pointer of the form
* InvolutionFn o PtrAdd
* where the InvolutionFn need not be linear.
*
* This differs subtly from swizzle_layout because the smem pointer is used
* as the offset. That means that swizzle_layout will implement position-independent
* swizzle layouts, while swizzle_ptr implements position-dependent swizzle tensors.
* Arch chose to design hardware with position-dependent swizzles.
*
* For clarity:
* NormalLayout : DeRef <- PtrAdd <- [Layout]
* ComposedLayout: DeRef <- PtrAdd <- [Swizzle <- OffsetAdd <- Layout]
* SwizzlePtr : [DeRef <- Swizzle <- PtrAdd] <- Layout
*
* Furthermore, for known swizzles, this pointer attempts to decay itself
* to a normal-pointer with a new layout containing dynamic or static strides.
* This is possible by determining the subdomain of the InvolutionFn
* that is identity and testing if the Layout's codomain is contained
* within it.
*/
namespace cute
{
// concept SwizzleFn {
// CUTE_HOST_DEVICE constexpr static uint apply(uint);
// }
// See Swizzle<B,M,S> in swizzle.hpp for common swizzle-functions.
template <class SwizzleFn, class Iterator>
struct swizzle_ptr : iter_adaptor<Iterator,swizzle_ptr<SwizzleFn,Iterator>>
{
using iterator = Iterator;
using reference = typename iterator_traits<iterator>::reference;
using element_type = typename iterator_traits<iterator>::element_type;
using value_type = typename iterator_traits<iterator>::value_type;
using iter_adaptor<Iterator,swizzle_ptr<SwizzleFn,Iterator>>::iter_adaptor;
template <class Iter>
CUTE_HOST_DEVICE constexpr static
Iter apply_swizzle(Iter ptr) {
return {apply_swizzle(ptr.get())};
}
template <class T>
CUTE_HOST_DEVICE constexpr static
T* apply_swizzle(T* ptr) {
return reinterpret_cast<T*>(SwizzleFn::apply(reinterpret_cast<uintptr_t>(ptr)));
}
template <class T>
CUTE_HOST_DEVICE constexpr static
subbyte_iterator<T> apply_swizzle(subbyte_iterator<T> ptr) {
return {apply_swizzle(ptr.ptr_), ptr.idx_};
}
CUTE_HOST_DEVICE constexpr
reference operator*() const {
return *apply_swizzle(this->get());
}
template <class Int>
CUTE_HOST_DEVICE constexpr
reference operator[](Int const& i) const {
return *apply_swizzle(this->get() + i);
}
};
template <class T, class = void> // Default No-Swizzle
struct get_swizzle { using type = Swizzle<0,4,3>; };
template <class SwizzleFn, class P> // Found the SwizzleFn
struct get_swizzle<swizzle_ptr<SwizzleFn,P>> { using type = SwizzleFn; };
template <class T> // Recurse into anything with a ::iterator
struct get_swizzle<T, void_t<typename T::iterator>> : get_swizzle<typename T::iterator> {};
template <class Iter>
using get_swizzle_t = typename get_swizzle<Iter>::type;
template <class Iterator, class SwizzleFn>
CUTE_HOST_DEVICE constexpr
swizzle_ptr<SwizzleFn,Iterator>
make_swizzle_ptr(Iterator ptr, SwizzleFn) {
return {ptr};
}
// Swizzle-0 specialization for immediate decay
template <class Iterator, int M, int S>
CUTE_HOST_DEVICE constexpr
Iterator
make_swizzle_ptr(Iterator ptr, Swizzle<0,M,S>) {
return ptr;
}
//
// Recast
//
template <class SwizzleFn, class P>
CUTE_HOST_DEVICE constexpr
auto
raw_pointer_cast(swizzle_ptr<SwizzleFn,P> const& ptr) {
return raw_pointer_cast(ptr.get());
}
// SwizzleFn operates on the pointer address, so it doesn't care about the type
template <class NewT, class SwizzleFn, class P>
CUTE_HOST_DEVICE constexpr
auto
recast_ptr(swizzle_ptr<SwizzleFn,P> const& ptr) {
return make_swizzle_ptr(recast_ptr<NewT>(ptr.get()), SwizzleFn{});
}
//
// Display utilities
//
template <class SwizzleFn, class P>
CUTE_HOST_DEVICE void print(swizzle_ptr<SwizzleFn,P> ptr)
{
print(SwizzleFn{}); printf("_"); print(ptr.get());
}
#if !defined(__CUDACC_RTC__)
template <class SwizzleFn, class P>
CUTE_HOST std::ostream& operator<<(std::ostream& os, swizzle_ptr<SwizzleFn,P> ptr)
{
return os << SwizzleFn{} << "_" << ptr.get();
}
#endif
} // end namespace cute
| include/cute/pointer_swizzle.hpp/0 | {
"file_path": "include/cute/pointer_swizzle.hpp",
"repo_id": "include",
"token_count": 2016
} | 14 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Architecture-specific operators on memory added for SM75
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cute/arch/copy_sm75.hpp"
#include "cute/arch/util.hpp"
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Layout of destination matrix (column-major implies transpose)
typename Layout,
/// .x1, .x2, or .x4
int MatrixCount
>
inline __device__ void ldsm(Array<unsigned, MatrixCount> & D, void const* ptr);
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Determine the appropriate way to target PTX's "ldmatrix" instruction.
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// CUTLASS helper to get SMEM pointer
inline __device__ unsigned cutlass_get_smem_pointer(void *ptr) {
return cute::cast_smem_ptr_to_uint(ptr);
}
/// CUTLASS helper to get SMEM pointer
inline __device__ unsigned cutlass_get_smem_pointer(void const *ptr) {
return cutlass_get_smem_pointer(const_cast<void *>(ptr));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::RowMajor, 1>(
Array<unsigned, 1> & D,
void const* ptr) {
#if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x;
asm volatile ("ldmatrix.sync.aligned.x1.m8n8.shared.b16 {%0}, [%1];" : "=r"(x) : "r"(addr));
reinterpret_cast<int &>(D) = x;
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::RowMajor, 2>(
Array<unsigned, 2> & D,
void const* ptr) {
#if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y;
asm volatile ("ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0, %1}, [%2];" : "=r"(x), "=r"(y) : "r"(addr));
reinterpret_cast<int2 &>(D) = make_int2(x, y);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::RowMajor, 4>(
Array<unsigned, 4> & D,
void const* ptr) {
#if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y, z, w;
asm volatile ("ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];" : "=r"(x), "=r"(y), "=r"(z), "=r"(w) : "r"(addr));
reinterpret_cast<int4 &>(D) = make_int4(x, y, z, w);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Transpose on 16b granularity
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::ColumnMajor, 1>(
Array<unsigned, 1> & D,
void const* ptr) {
#if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x;
asm volatile ("ldmatrix.sync.aligned.x1.trans.m8n8.shared.b16 {%0}, [%1];" : "=r"(x) : "r"(addr));
reinterpret_cast<int &>(D) = x;
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::ColumnMajor, 2>(
Array<unsigned, 2> & D,
void const* ptr) {
#if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y;
asm volatile ("ldmatrix.sync.aligned.x2.trans.m8n8.shared.b16 {%0, %1}, [%2];" : "=r"(x), "=r"(y) : "r"(addr));
reinterpret_cast<int2 &>(D) = make_int2(x, y);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::ColumnMajor, 4>(
Array<unsigned, 4> & D,
void const* ptr) {
#if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y, z, w;
asm volatile ("ldmatrix.sync.aligned.x4.trans.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];" : "=r"(x), "=r"(y), "=r"(z), "=r"(w) : "r"(addr));
reinterpret_cast<int4 &>(D) = make_int4(x, y, z, w);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType, int Bytes>
struct shared_load_op {
CUTLASS_DEVICE
shared_load_op(AccessType &D, void const *ptr) {
D = *reinterpret_cast<AccessType const *>(ptr);
}
};
template <typename AccessType>
CUTLASS_DEVICE void shared_load(AccessType &D, void const *ptr) {
shared_load_op<AccessType, int(sizeof(AccessType))>(D, ptr);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType>
struct shared_load_op<AccessType, 16> {
CUTLASS_DEVICE
shared_load_op(AccessType &D, void const *ptr) {
unsigned addr = cutlass_get_smem_pointer(ptr);
uint4 v;
asm volatile ("ld.shared.v4.b32 {%0, %1, %2, %3}, [%4];" :
"=r"(v.x), "=r"(v.y), "=r"(v.z), "=r"(v.w) : "r"(addr));
D = reinterpret_cast<AccessType const &>(v);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType>
struct shared_load_op<AccessType, 8> {
CUTLASS_DEVICE
shared_load_op(AccessType &D, void const *ptr) {
unsigned addr = cutlass_get_smem_pointer(ptr);
uint2 v;
asm volatile ("ld.shared.v2.b32 {%0, %1}, [%2];" :
"=r"(v.x), "=r"(v.y) : "r"(addr));
D = reinterpret_cast<AccessType const &>(v);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| include/cutlass/arch/memory_sm75.h/0 | {
"file_path": "include/cutlass/arch/memory_sm75.h",
"repo_id": "include",
"token_count": 2780
} | 15 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/layout/tensor.h"
#include "cutlass/arch/mma.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/dispatch_policy.hpp"
#include "cutlass/detail/layout.hpp"
#include "cutlass/gemm/collective/builders/sm90_common.inl"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::conv::collective::detail {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Maps a rank-1 cute::Shape<> representing the cluster shape on to the IM2COL TMA atom that should be used with it
template <class UnimodalClusterShape>
constexpr auto
sm90_cluster_shape_to_im2col_tma_atom(UnimodalClusterShape unimodal_cluster_shape) {
static_assert(cute::rank(unimodal_cluster_shape) == 1,
"Use this function to figure out TMA for each mode individually.");
if constexpr (cute::size(unimodal_cluster_shape) == 1) {
return cute::SM90_TMA_LOAD_IM2COL{};
}
else {
return cute::SM90_TMA_LOAD_IM2COL_MULTICAST{};
}
}
// Collective tile traits struct that serves as a type list containing a tensor's mem layouts and atoms for the
template<
class GmemTiledCopy_,
class SmemLayout_,
class SmemCopyAtom_ = void
>
struct Sm90ImplicitGemmTileTraits {
using GmemTiledCopy = GmemTiledCopy_;
using SmemLayout = SmemLayout_;
using SmemCopyAtom = SmemCopyAtom_;
};
// Accepts a cutlass::layout::Tensor tag and computes the corresponding spatial dimension count
template <class GmemLayoutTagA, class GmemLayoutTagB>
constexpr int
gmem_layout_tags_to_spatial_dims() {
static_assert(cute::is_same_v<GmemLayoutTagA, GmemLayoutTagB>);
if constexpr (cute::is_same_v<GmemLayoutTagA, cutlass::layout::TensorNWC>) {
return 1;
}
else if constexpr (cute::is_same_v<GmemLayoutTagA, cutlass::layout::TensorNHWC>) {
return 2;
}
else if constexpr (cute::is_same_v<GmemLayoutTagA, cutlass::layout::TensorNDHWC>) {
return 3;
}
else {
static_assert(cutlass::detail::dependent_false<GmemLayoutTagA>);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::conv::collective::detail
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/collective/builders/sm90_common.inl/0 | {
"file_path": "include/cutlass/conv/collective/builders/sm90_common.inl",
"repo_id": "include",
"token_count": 1192
} | 16 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions for threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/conv/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_reduction.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/threadblock/conv2d_tile_iterator.h"
#include "cutlass/conv/threadblock/implicit_gemm_pipelined.h"
#include "cutlass/conv/threadblock/implicit_gemm_multistage.h"
#include "cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h"
#include "cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h"
#include "cutlass/conv/kernel/implicit_gemm_convolution.h"
#include "cutlass/conv/kernel/implicit_gemm_convolution_fusion.h"
#include "cutlass/conv/kernel/implicit_gemm_convolution_strided_dgrad.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename ArchTag,
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename OutputOp
>
struct DefaultConvEpilogue {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
OutputOp::kCount
>::Epilogue;
};
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename OutputOp
>
struct DefaultConvEpilogue<
arch::Sm70,
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp
> {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
OutputOp::kCount
>::Epilogue;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ArchTag,
typename Shape,
typename WarpMmaSimt,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
int ElementsPerAccess,
typename PermuteDLayout = layout::NoPermute,
conv::StrideSupport StrideSupport = conv::StrideSupport::kUnity,
int Rank = 4
>
struct DefaultConvEpilogueWithBroadcastSimt {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastSimt<
Shape,
WarpMmaSimt,
ElementOutput,
ElementTensor,
ElementVector,
OutputOp,
ElementsPerAccess,
false,
PermuteDLayout,
StrideSupport,
Rank
>::Epilogue;
};
template <
typename ArchTag,
typename Shape,
typename WarpMmaSimt,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithBroadcastSimtStridedDgrad {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastSimtStridedDgrad<
Shape,
WarpMmaSimt,
ElementOutput,
ElementTensor,
ElementVector,
OutputOp,
ElementsPerAccess
>::Epilogue;
};
template <
typename ArchTag,
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithBroadcastTensorOp {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
ElementTensor,
ElementVector,
OutputOp,
ElementsPerAccess
>::Epilogue;
};
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithBroadcastTensorOp<
arch::Sm70,
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
ElementTensor,
ElementVector,
OutputOp,
ElementsPerAccess
> {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastVoltaTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
ElementTensor,
ElementVector,
OutputOp,
ElementsPerAccess
>::Epilogue;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ArchTag,
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename OutputOp,
typename ReductionOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithReductionTensorOp {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
ElementsPerAccess
>::Epilogue;
};
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename OutputOp,
typename ReductionOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithReductionTensorOp<
arch::Sm70,
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
ElementsPerAccess
> {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithReductionVoltaTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
ElementsPerAccess
>::Epilogue;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Defaults for strided Dgrad
template <
typename ArchTag,
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename OutputOp
>
struct DefaultConvEpilogueStridedDgrad {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOpStridedDgrad<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
OutputOp::kCount
>::Epilogue;
};
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename OutputOp
>
struct DefaultConvEpilogueStridedDgrad<
arch::Sm70,
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp
> {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueVoltaTensorOpStridedDgrad<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
OutputOp::kCount
>::Epilogue;
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/default_conv2d.h/0 | {
"file_path": "include/cutlass/conv/kernel/default_conv2d.h",
"repo_id": "include",
"token_count": 2832
} | 17 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Defines a GEMM with Broadcast based on an existing UniversalGemm kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_deconv2d.h"
#include "cutlass/conv/kernel/implicit_gemm_convolution_with_fused_epilogue.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h"
#include "cutlass/epilogue/threadblock/epilogue_with_broadcast.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized,
conv::StrideSupport StrideSupport = StrideSupport::kStrided,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value
>
struct DefaultDeconv2dWithBroadcast {
using ImplicitGemmBase = typename DefaultDeconv2d<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
StrideSupport,
AlignmentA,
AlignmentB
>::Kernel;
// Define epilogue
using Epilogue = typename cutlass::conv::kernel::detail::DefaultConvEpilogueWithBroadcastTensorOp<
ArchTag,
typename ImplicitGemmBase::Epilogue::Shape,
typename ImplicitGemmBase::Epilogue::WarpMmaOperator,
ImplicitGemmBase::Epilogue::kPartitionsK,
ElementC,
typename EpilogueOutputOp::ElementT,
typename EpilogueOutputOp::ElementVector,
EpilogueOutputOp,
ImplicitGemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionWithFusedEpilogue<
typename ImplicitGemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassSimt convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Deconv2d specialization,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2dWithBroadcast <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
conv::StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
using ImplicitGemmBase = typename DefaultDeconv2d<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
conv::StrideSupport::kUnity,
AlignmentA,
AlignmentB
>::Kernel;
// Define epilogue
using Epilogue = typename cutlass::conv::kernel::detail::DefaultConvEpilogueWithBroadcastSimt<
ArchTag,
typename ImplicitGemmBase::Epilogue::Shape,
typename ImplicitGemmBase::Epilogue::WarpMmaOperator,
ElementC,
typename EpilogueOutputOp::ElementT,
typename EpilogueOutputOp::ElementVector,
EpilogueOutputOp,
ImplicitGemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionWithFusedEpilogue<
typename ImplicitGemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2dWithBroadcast <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
using ImplicitGemmBase = typename DefaultDeconv2d<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
>::Kernel;
// Define epilogue
using Epilogue = typename cutlass::conv::kernel::detail::DefaultConvEpilogueWithBroadcastSimtStridedDgrad<
ArchTag,
typename ImplicitGemmBase::Epilogue::Shape,
typename ImplicitGemmBase::Epilogue::WarpMmaOperator,
ElementC,
typename EpilogueOutputOp::ElementT,
typename EpilogueOutputOp::ElementVector,
EpilogueOutputOp,
ImplicitGemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionWithFusedEpilogue<
typename ImplicitGemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/default_deconv2d_with_broadcast.h/0 | {
"file_path": "include/cutlass/conv/kernel/default_deconv2d_with_broadcast.h",
"repo_id": "include",
"token_count": 2872
} | 18 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorNCxHWx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>,
conv::GroupMode GroupMode_ = conv::GroupMode::kNone
>
class Conv2dFpropActivationTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static conv::GroupMode const kGroupMode = GroupMode_;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dAnalyticParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
int filter_c_;
int filter_r_;
int filter_s_;
int filter_c_init_;
int group_idx_offset_;
int channels_per_group_;
int crs_cnt_;
int crs_per_group_;
int offset_n_[ThreadMap::Iterations::kStrided];
int offset_p_[ThreadMap::Iterations::kStrided];
int offset_q_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dFpropActivationTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
crs_cnt_(0),
group_idx_offset_(0),
filter_c_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.column() + thread_coord.contiguous();
if (kGroupMode != conv::GroupMode::kNone) {
filter_c_init_ = filter_c_;
channels_per_group_ = problem_size_.C / problem_size_.groups;
crs_per_group_ = problem_size_.S * problem_size_.R * ((channels_per_group_ + Shape::kColumn - 1) / Shape::kColumn);
}
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
int offset_npq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
offset_n_[s] = offset_npq / (problem_size_.P * problem_size_.Q);
int residual = offset_npq % (problem_size_.P * problem_size_.Q);
offset_p_[s] = residual / problem_size_.Q;
offset_q_[s] = residual % problem_size_.Q;
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
if (kGroupMode != conv::GroupMode::kNone) {
++crs_cnt_;
}
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
if (kGroupMode == conv::GroupMode::kNone) {
filter_c_ += Shape::kColumn * problem_size_.split_k_slices;
} else {
if (crs_cnt_ == crs_per_group_) {
// moves to next group
crs_cnt_ = 0;
++group_idx_offset_;
filter_c_ = group_idx_offset_ * channels_per_group_ + filter_c_init_;
} else {
filter_c_ += Shape::kColumn * problem_size_.split_k_slices;
}
}
}
/// Returns the coordinate in the activations tensor X that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int n = offset_n_[iteration_strided_];
int p = offset_p_[iteration_strided_];
int q = offset_q_[iteration_strided_];
int r = filter_r_;
int s = filter_s_;
if (problem_size_.mode == Mode::kConvolution) {
r = (problem_size_.R - 1 - filter_r_);
s = (problem_size_.S - 1 - filter_s_);
}
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
int c = filter_c_ + iteration_vector_ * AccessType::kElements;
return TensorCoord(n, h, w, c);
}
/// Returns true if the current coordinate is within the activations tensor X
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W &&
coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
AccessType const *ptr = reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
return ptr;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dFpropActivationTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if ((problem_size.C / problem_size.groups) % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
if (platform::is_same<Layout, layout::TensorNCxHWx<32>>::value) {
if (problem_size.C % 32) {
return Status::kErrorInvalidProblem;
}
}
if (platform::is_same<Layout, layout::TensorNCxHWx<64>>::value) {
if (problem_size.C % 64) {
return Status::kErrorInvalidProblem;
}
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 3772
} | 19 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kStrided
>
class Conv3dDgradOutputGradientTileAccessIteratorAnalytic;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv3dDgradOutputGradientTileAccessIteratorAnalytic strided dgrad needs special handling using
// unscaled coordinations
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dDgradOutputGradientTileAccessIteratorAnalytic <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kStrided
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or greater.");
//
// Simpligying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
struct Params {
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
ConvProblemSize const &problem_size,
Layout const &layout
): layout(layout) {
}
};
private:
Params const ¶ms_;
ConvProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
int filter_k_;
int filter_t_;
int filter_r_;
int filter_s_;
int offset_n_[ThreadMap::Iterations::kStrided];
int offset_d_[ThreadMap::Iterations::kStrided];
int offset_w_[ThreadMap::Iterations::kStrided];
int offset_h_[ThreadMap::Iterations::kStrided];
private:
/// Returns the coordinate in the output tensor Dy that is currently pointed to
/// by the iterator but DOES NOT scale by the convolution stride. This is needed
/// to compute predicates in the valid() method. The return value of the public at()
/// method is correctly scaled.
CUTLASS_HOST_DEVICE
TensorCoord unscaled_at_() const {
int n = offset_n_[iteration_strided_];
int d = offset_d_[iteration_strided_];
int h = offset_h_[iteration_strided_];
int w = offset_w_[iteration_strided_];
int t = filter_t_;
int r = filter_r_;
int s = filter_s_;
if (problem_size_.mode == Mode::kConvolution) {
t = (problem_size_.T - 1 - t);
r = (problem_size_.R - 1 - r);
s = (problem_size_.S - 1 - s);
}
int z = (d + problem_size_.pad_d - t * problem_size_.dilation_d);
int p = (h + problem_size_.pad_h - r * problem_size_.dilation_h);
int q = (w + problem_size_.pad_w - s * problem_size_.dilation_w);
return TensorCoord(n, z, p, q, filter_k_);
}
public:
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientTileAccessIteratorAnalytic(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // threadblock offset - units are whole CTA tiles
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_k_(0),
filter_t_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
int offset_ndhw = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
offset_n_[s] = offset_ndhw / (problem_size_.D * problem_size_.H * problem_size_.W);
int residual = offset_ndhw % (problem_size_.D * problem_size_.H * problem_size_.W);
offset_d_[s] = residual / (problem_size_.H * problem_size_.W);
residual = residual % (problem_size_.H * problem_size_.W);
offset_h_[s] = residual / problem_size_.W;
offset_w_[s] = residual % problem_size_.W;
}
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// move to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
return;
}
filter_t_ = 0;
filter_k_ += Shape_::kColumn * problem_size_.split_k_slices;
}
/// Returns the coordinate in the output tensor Dy that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
TensorCoord coord = unscaled_at_();
return TensorCoord(
coord.n(),
coord.d() / problem_size_.stride_d,
coord.h() / problem_size_.stride_h,
coord.w() / problem_size_.stride_w,
coord.c());
}
/// Returns true if the current coordinate is within the output tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord unscaled_coord = unscaled_at_();
TensorCoord coord = at();
return
!(unscaled_coord.d() % problem_size_.stride_d) &&
!(unscaled_coord.h() % problem_size_.stride_h) &&
!(unscaled_coord.w() % problem_size_.stride_w) &&
coord.n() < problem_size_.N &&
coord.d() >= 0 && coord.d() < problem_size_.Z &&
coord.h() >= 0 && coord.h() < problem_size_.P &&
coord.w() >= 0 && coord.w() < problem_size_.Q &&
coord.c() < problem_size_.K;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 3697
} | 20 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Transformation applied to A operand
typename TransformA_ = NumericArrayConverter<
typename SmemIteratorA_::Element,
typename IteratorA_::Element,
IteratorA_::Fragment::kElements>,
///
/// Transformation applied to A operand
typename TransformB_ = NumericArrayConverter<
typename SmemIteratorB_::Element,
typename IteratorB_::Element,
IteratorB_::Fragment::kElements>,
/// Used for partial specialization
typename Enable = bool
>
class DepthwiseFpropPipelined : public gemm::threadblock::MmaBase<Shape_, Policy_, 2> {
public:
///< Base class
using Base = gemm::threadblock::MmaBase<Shape_, Policy_, 2>;
using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory
using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory
using ElementC = ElementC_; ///< Data type of accumulator matrix
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
using Policy = Policy_; ///< Policy describing tuning details
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
//
// Dependent types
//
/// Fragment of operand A loaded from global memory
using FragmentA = typename IteratorA::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB = typename IteratorB::Fragment;
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Obtain the arch tag from the warp-level operator
using ArchTag = typename Policy::Operator::ArchTag;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
static_assert((Base::kStages==2), "MmaPipelined requires kStages set to value 2");
private:
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
protected:
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
DepthwiseFpropPipelined(
typename Base::SharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM
int thread_idx, ///< ID within the threadblock
int warp_idx, ///< ID of warp
int lane_idx ///< ID of each thread within a warp
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
int gemm_k_iterations, ///< number of iterations of the mainloop
FragmentC &accum, ///< destination accumulator tile
IteratorA iterator_A, ///< iterator over A operand in global memory
IteratorB iterator_B, ///< iterator over B operand in global memory
FragmentC const &src_accum, ///< source accumulator tile
int gemm_k_iterations_per_channel = 0, ///< number of iterations per channel
TransformA transform_A = TransformA(), ///< transformation applied to A fragment
TransformB transform_B = TransformB()) { ///< transformation applied to B fragment
//
// Prologue
//
// Perform accumulation in the 'd' output operand
accum = src_accum;
FragmentA tb_frag_A;
FragmentB tb_frag_B;
tb_frag_A.clear();
tb_frag_B.clear();
// The last kblock is loaded in the prolog
iterator_A.load(tb_frag_A);
iterator_B.load(tb_frag_B);
++iterator_A;
++iterator_B;
this->smem_iterator_A_.store(transform_A(tb_frag_A));
this->smem_iterator_B_.store(transform_B(tb_frag_B));
++this->smem_iterator_A_;
++this->smem_iterator_B_;
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA warp_frag_A[2];
WarpFragmentB warp_frag_B[2];
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_A[0]);
this->warp_tile_iterator_B_.load(warp_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
Operator warp_mma;
int smem_write_stage_idx = 1;
// Depthwise specific
int channel_start_index = 0;
int rs_plane_idx = 0;
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing
// shared memory loads (which have the tightest latency requirement).
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
//
// Loop over GEMM K dimension
//
if(rs_plane_idx == gemm_k_iterations_per_channel - 1){
// Reset interation index.
iterator_B.set_iteration_index(0);
}
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(transform_A(tb_frag_A));
this->smem_iterator_B_.store(transform_B(tb_frag_B));
__syncthreads();
if(rs_plane_idx == gemm_k_iterations_per_channel - 1){
// Move to next set of filter groups.
channel_start_index += Base::kWarpGemmIterations;
}
++this->smem_iterator_A_;
++this->smem_iterator_B_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
else {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations,
0});
}
smem_write_stage_idx ^= 1;
}
this->warp_tile_iterator_A_.set_kgroup_index(channel_start_index + (warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index(channel_start_index + (warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k == 0) {
iterator_A.load(tb_frag_A);
iterator_B.load(tb_frag_B);
++iterator_A;
++iterator_B;
}
warp_mma(accum, warp_frag_A[warp_mma_k % 2],
warp_frag_B[warp_mma_k % 2], accum);
}
rs_plane_idx = (rs_plane_idx == gemm_k_iterations_per_channel - 1) ? 0: (rs_plane_idx + 1);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/depthwise_fprop_pipelined.h/0 | {
"file_path": "include/cutlass/conv/threadblock/depthwise_fprop_pipelined.h",
"repo_id": "include",
"token_count": 4719
} | 21 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Basic include for CUTLASS.
*/
#pragma once
#include "cutlass/detail/helper_macros.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/// Status code returned by CUTLASS operations
enum class Status {
kSuccess, ///< Operation was successful.
kErrorMisalignedOperand, ///< operands fail alignment requirements.
kErrorInvalidDataType, ///< DataType fails requirement.
kErrorInvalidLayout, ///< Layout fails alignment requirement.
kErrorInvalidProblem, ///< Specified problem size is not supported by operator.
kErrorNotSupported, ///< Operation is not supported on current device.
kErrorWorkspaceNull, ///< The given workspace is null when it is required to be non-null.
kErrorInternal, ///< An error within CUTLASS occurred.
kErrorArchMismatch, ///< CUTLASS runs on a device that it was not compiled for.
kErrorInsufficientDriver, ///< CUTLASS runs with a driver that is too old.
kErrorMemoryAllocation, ///< Kernel launch failed due to insufficient device memory.
kInvalid ///< Status is unspecified.
};
/// Convert cutlass status to status strings
CUTLASS_HOST_DEVICE
static char const* cutlassGetStatusString(cutlass::Status status) {
switch (status) {
case cutlass::Status::kSuccess:
return "Success";
case cutlass::Status::kErrorMisalignedOperand:
return "Error Misaligned Operand";
case cutlass::Status::kErrorInvalidDataType:
return "Error Invalid Data Type";
case cutlass::Status::kErrorInvalidLayout:
return "Error Invalid Layout";
case cutlass::Status::kErrorInvalidProblem:
return "Error Invalid Problem";
case cutlass::Status::kErrorNotSupported:
return "Error Not Supported";
case cutlass::Status::kErrorWorkspaceNull:
return "Error Workspace Null";
case cutlass::Status::kErrorInternal:
return "Error Internal";
case cutlass::Status::kErrorInsufficientDriver:
return "Error Insufficient Driver";
case cutlass::Status::kErrorArchMismatch:
return "Error Architecture Mismatch";
case cutlass::Status::kErrorMemoryAllocation:
return "Error Memory Allocation failed";
case cutlass::Status::kInvalid: break;
}
return "Invalid status";
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static const int NumThreadsPerWarp = 32;
static const int NumThreadsPerWarpGroup = 128;
static const int NumWarpsPerWarpGroup = NumThreadsPerWarpGroup / NumThreadsPerWarp;
static const int NumThreadsPerHalfWarp = NumThreadsPerWarp / 2;
static const int NumThreadsPerQuad = 4;
static const int NumThreadsPerQuadPair = NumThreadsPerQuad * 2;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper function to return true when called by thread 0 of threadblock 0.
CUTLASS_HOST_DEVICE bool thread0() {
#if defined(__CUDA_ARCH__)
return (!threadIdx.x && !threadIdx.y && !threadIdx.z) && (!blockIdx.x && !blockIdx.y && !blockIdx.z);
#else
return false;
#endif
}
/// Returns a lane index in the warp. The threads in warp may not be convergent
CUTLASS_DEVICE
int canonical_lane_idx() {
#if defined(__CUDA_ARCH__)
return threadIdx.x % NumThreadsPerWarp;
#else
return 0;
#endif
}
/// Returns a warp-uniform value indicating the canonical warp index of the calling threads.
/// Threads within the warp must be converged.
CUTLASS_DEVICE
int canonical_warp_idx_sync() {
#if defined(__CUDA_ARCH__)
return __shfl_sync(0xffffffff, threadIdx.x / NumThreadsPerWarp, 0);
#else
return 0;
#endif
}
/// Returns a warp index in the CTA. The threads in warp may not be convergent
/// As it doesn't sync the warp, it faster and allows forward progress
CUTLASS_DEVICE
int canonical_warp_idx() {
#if defined(__CUDA_ARCH__)
return threadIdx.x / NumThreadsPerWarp;
#else
return 0;
#endif
}
/// Returns a warp-uniform value indicating the canonical warp group index of the calling threads.
/// Threads within the warp must be converged.
CUTLASS_DEVICE
int canonical_warp_group_idx() {
#if defined(__CUDA_ARCH__)
return __shfl_sync(0xffffffff, threadIdx.x / NumThreadsPerWarpGroup, 0);
#else
return 0;
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/cutlass.h/0 | {
"file_path": "include/cutlass/cutlass.h",
"repo_id": "include",
"token_count": 1922
} | 22 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing elementwise operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/barrier.h"
#include "cutlass/epilogue/dispatch_policy.hpp"
#include "cutlass/epilogue/collective/detail.hpp"
#include "cutlass/epilogue/thread/scale_type.h"
#include "cutlass/epilogue/fusion/callbacks.hpp"
#include "cutlass/epilogue/fusion/sm90_callbacks_tma_warpspecialized.hpp"
#include "cutlass/detail/collective.hpp"
#include "cutlass/detail/layout.hpp"
#include "cutlass/trace.h"
#include "cute/tensor.hpp"
#include "cutlass/cuda_host_adapter.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace collective {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int StagesC_,
int StagesD_,
int FragmentSize_,
bool ReuseSmemC_,
bool DelayTmaStore_,
class CtaTileMNK_, // (CTA_M,CTA_N,CTA_K)
class EpilogueTile_, // (EPI_TILE_M,EPI_TILE_N)
class ElementC_,
class StrideC_,
class ElementD_,
class StrideD_,
class FusionCallbacks_,
class CopyOpG2S_,
class SmemLayoutAtomC_,
class CopyOpS2R_,
class CopyOpS2G_,
class SmemLayoutAtomD_,
class CopyOpR2S_,
class CopyAtomC_
>
class CollectiveEpilogue<
Sm90PtrArrayTmaWarpSpecialized<StagesC_,StagesD_,FragmentSize_,ReuseSmemC_,DelayTmaStore_>,
CtaTileMNK_,
EpilogueTile_,
ElementC_,
StrideC_,
ElementD_,
StrideD_,
FusionCallbacks_,
CopyOpG2S_,
SmemLayoutAtomC_,
CopyOpS2R_,
CopyOpS2G_,
SmemLayoutAtomD_,
CopyOpR2S_,
CopyAtomC_
> {
public:
//
// Type Aliases
//
using DispatchPolicy = Sm90PtrArrayTmaWarpSpecialized<StagesC_,StagesD_,FragmentSize_,ReuseSmemC_,DelayTmaStore_>;
using CtaTileMNK = CtaTileMNK_;
using EpilogueTile = EpilogueTile_;
using FusionCallbacks = FusionCallbacks_;
using ElementC = ElementC_;
using StrideC = StrideC_;
using InternalStrideC = cute::remove_pointer_t<StrideC>;
using ElementD = ElementD_;
using StrideD = StrideD_;
using InternalStrideD = cute::remove_pointer_t<StrideD>;
using CopyOpG2S = CopyOpG2S_;
using SmemLayoutAtomC = SmemLayoutAtomC_;
using CopyOpS2R = CopyOpS2R_;
using CopyOpS2G = CopyOpS2G_;
using SmemLayoutAtomD = SmemLayoutAtomD_;
using CopyOpR2S = CopyOpR2S_;
using CopyAtomC = CopyAtomC_;
using ThreadEpilogueOp = typename epilogue::fusion::FusionCallbacksTraits<FusionCallbacks>::Operation;
using GmemTiledCopyC = CopyOpG2S;
using GmemTiledCopyD = CopyOpS2G;
static_assert(!is_layout<EpilogueTile>::value && is_tuple<EpilogueTile>::value, "EpilogueTile must be a cute::Tile or cute::Shape");
static_assert(cute::rank(CtaTileMNK{}) == 3, "CtaTileMNK must be rank-3: [CTA_M, CTA_N, CTA_K]");
static_assert(cute::rank(EpilogueTile{}) == 2, "EpilogueTile must be rank-2: [EPI_TILE_M, EPI_TILE_N]");
static_assert(size<0>(CtaTileMNK{}) % size<0>(shape(EpilogueTile{})) == 0, "EPI_TILE_M must divide CTA_M");
static_assert(size<1>(CtaTileMNK{}) % size<1>(shape(EpilogueTile{})) == 0, "EPI_TILE_N must divide CTA_N");
static_assert(cute::rank(InternalStrideC{}) == 3, "StrideC must be rank-3: [M, N, L]");
static_assert(cute::rank(InternalStrideD{}) == 3, "StrideD must be rank-3: [M, N, L]");
private:
constexpr static bool is_source_supported = not cute::is_void_v<ElementC>;
constexpr static bool is_destination_supported = not cute::is_void_v<ElementD>;
using NonVoidElementD = cute::conditional_t<not is_destination_supported,fusion::get_element_aux_t<FusionCallbacks>, ElementD>;
static_assert(not cute::is_void_v<NonVoidElementD>, "SmemElementD is void");
using NonVoidElementC = cute::conditional_t<not is_source_supported,NonVoidElementD,ElementC>; // prevents void ref breakages
using SmemElementC = typename cutlass::detail::get_unpacked_element_type<NonVoidElementC>::type;
using SmemElementD = typename cutlass::detail::get_unpacked_element_type<NonVoidElementD>::type;
constexpr static int StagesC = StagesC_;
constexpr static int StagesD = StagesD_;
constexpr static bool ReuseSmemC = ReuseSmemC_ and is_destination_supported;
constexpr static bool DelayTmaStore = DelayTmaStore_;
constexpr static bool is_m_major_C = detail::is_m_major<InternalStrideC>();
constexpr static bool is_m_major_D = detail::is_m_major<InternalStrideD>();
constexpr static bool is_im2col_C = cute::is_same_v<CopyOpG2S, SM90_TMA_LOAD_IM2COL>;
constexpr static bool is_im2col_D = cute::is_same_v<CopyOpS2G, SM90_TMA_STORE_IM2COL>;
using SmemLayoutC = decltype(tile_to_shape(
SmemLayoutAtomC{},
make_shape(size<0>(EpilogueTile{}), size<1>(EpilogueTile{}), Int<StagesC>{}),
cute::conditional_t<is_m_major_C, Step<_2,_1,_3>, Step<_1,_2,_3>>{} ));
using SmemLayoutD = decltype(tile_to_shape(
SmemLayoutAtomD{},
make_shape(size<0>(EpilogueTile{}), size<1>(EpilogueTile{}), Int<ReuseSmemC ? StagesC : StagesD>{}),
cute::conditional_t<is_m_major_D, Step<_2,_1,_3>, Step<_1,_2,_3>>{} ));
constexpr static bool support_smem_reuse = is_source_supported && is_destination_supported && StagesD <= StagesC
&& cosize(take<0,2>(SmemLayoutC{})) == cosize(take<0,2>(SmemLayoutD{}));
static_assert(not (ReuseSmemC && not support_smem_reuse), "Smem reuse requirements not met");
constexpr static size_t SmemAlignmentD = cutlass::detail::alignment_for_swizzle(SmemLayoutD{});
constexpr static size_t SmemAlignmentC = cutlass::detail::alignment_for_swizzle(SmemLayoutC{});
constexpr static size_t MaxSmemAlignment = cute::max(SmemAlignmentC, SmemAlignmentD);
using SmemArrayTypeC = cute::ArrayEngine<SmemElementC, cosize_v<SmemLayoutC>>;
using SmemArrayTypeD = cute::ArrayEngine<SmemElementD, cosize_v<SmemLayoutD>>;
using EmptyType = cute::tuple<>;
using SmemCStorage = cute::conditional_t<is_source_supported and (not ReuseSmemC),
SmemArrayTypeC,
EmptyType>;
using SmemDStorage = cute::conditional_t<is_destination_supported,
SmemArrayTypeD,
EmptyType>;
struct CollectiveStorageWithC {
alignas(SmemAlignmentC) ArrayEngine<SmemElementC, cosize_v<SmemLayoutC>> smem_C;
alignas(SmemAlignmentD) ArrayEngine<SmemElementD, cosize_v<SmemLayoutD>> smem_D;
};
union CollectiveStorageWithoutC {
cute::array<SmemElementC, 0> smem_C;
alignas(SmemAlignmentD) ArrayEngine<SmemElementD, cosize_v<SmemLayoutD>> smem_D;
};
union CollectiveStorageReuseC {
alignas(MaxSmemAlignment) ArrayEngine<SmemElementC, cosize_v<SmemLayoutC>> smem_C;
alignas(MaxSmemAlignment) ArrayEngine<SmemElementD, cosize_v<SmemLayoutD>> smem_D;
};
public:
// TMA pipeline for loading C
using LoadPipeline = cutlass::PipelineTransactionAsync<StagesC>;
using LoadPipelineState = cutlass::PipelineState<StagesC>;
constexpr static uint32_t TmaTransactionBytes =
(size(take<0,2>(SmemLayoutC{})) * static_cast<uint32_t>(sizeof_bits<SmemElementC>::value)) / 8;
constexpr static bool RequiresTransactionBytes = true;
// TMA pipeline for storing D
using StorePipeline = cute::conditional_t<ReuseSmemC,
cutlass::PipelineTmaStore<StagesC, StagesD-1>,
cutlass::PipelineTmaStore<StagesD>>;
using StorePipelineState = cutlass::PipelineState<ReuseSmemC ? StagesC : StagesD>;
struct SharedStorage {
struct TensorStorage {
using CollectiveStorage = cute::conditional_t<not is_source_supported, CollectiveStorageWithoutC,
cute::conditional_t<ReuseSmemC, CollectiveStorageReuseC, CollectiveStorageWithC>>;
CollectiveStorage collective;
using FusionStorage = typename FusionCallbacks::SharedStorage;
FusionStorage thread;
} tensors;
struct TensorMapStorage : cute::aligned_struct<128> {
cute::TmaDescriptor smem_tensormap_C;
cute::TmaDescriptor smem_tensormap_D;
} tensormaps;
using PipelineStorage = typename LoadPipeline::SharedStorage;
PipelineStorage pipeline;
};
using TensorStorage = typename SharedStorage::TensorStorage;
using TensorMapStorage = typename SharedStorage::TensorMapStorage;
using PipelineStorage = typename SharedStorage::PipelineStorage;
// Host side epilogue arguments
struct Arguments {
typename FusionCallbacks::Arguments thread{};
ElementC const** ptr_C = nullptr;
StrideC dC;
ElementD ** ptr_D = nullptr;
StrideD dD;
};
// Device side epilogue params
struct Params {
using TMA_C = decltype(make_tma_copy(
CopyOpG2S{},
make_tensor(make_gmem_ptr(static_cast<NonVoidElementC const*>(nullptr)),
repeat_like(InternalStrideC{}, int32_t(0)), InternalStrideC{}),
take<0,2>(SmemLayoutC{}),
EpilogueTile{},
_1{}));
using TMA_D = decltype(make_tma_copy(
CopyOpS2G{},
make_tensor(make_gmem_ptr(static_cast<NonVoidElementD const*>(nullptr)),
repeat_like(InternalStrideD{}, int32_t(0)), InternalStrideD{}),
take<0,2>(SmemLayoutD{}),
EpilogueTile{},
_1{}));
typename FusionCallbacks::Params thread{};
TMA_C tma_load_c;
TMA_D tma_store_d;
cute::TmaDescriptor* tensormaps;
ElementC const** ptr_C;
ElementD** ptr_D;
uint32_t tma_transaction_bytes = TmaTransactionBytes;
};
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(
ProblemShape const& problem_shape,
Arguments const& args,
[[maybe_unused]] void* workspace) {
// Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(problem_shape.get_host_problem_shape(), 1);
auto [M, N, K, mock_L] = problem_shape_MNKL;
// Manage batches/groups through pointers to input matricies
mock_L = 1;
static_assert(!is_im2col_C and !is_im2col_D, "Im2Col not supported on C or D");
uint32_t transaction_bytes = TmaTransactionBytes;
typename Params::TMA_C tma_load_c = {};
if constexpr (is_source_supported) {
ElementC const* ptr_C_first_batch = reinterpret_cast<ElementC const*>(args.ptr_C);
Tensor tensor_c = make_tensor(ptr_C_first_batch, make_layout(make_shape(M,N,mock_L), append<3>(args.dC, _0{})));
tma_load_c = make_tma_copy_C_sm90(
CopyOpG2S{},
tensor_c,
take<0,2>(SmemLayoutC{}),
EpilogueTile{});
}
typename Params::TMA_D tma_store_d;
if constexpr (is_destination_supported) {
ElementD const* ptr_D_first_batch = reinterpret_cast<ElementD const*>(args.ptr_D);
Tensor tensor_d = make_tensor(ptr_D_first_batch, make_layout(make_shape(M,N,mock_L), append<3>(args.dD, _0{})));
tma_store_d = make_tma_copy_C_sm90(
CopyOpS2G{},
tensor_d,
take<0,2>(SmemLayoutD{}),
EpilogueTile{});
}
auto fusion_workspace = static_cast<char*>(workspace);
auto fusion_workspace_size = FusionCallbacks::get_workspace_size(problem_shape, args.thread);
auto tma_descriptor_workspace = reinterpret_cast<cute::TmaDescriptor*>(
static_cast<char*>(workspace) + fusion_workspace_size);
return {
FusionCallbacks::to_underlying_arguments(problem_shape, args.thread, fusion_workspace),
tma_load_c,
tma_store_d,
tma_descriptor_workspace,
args.ptr_C,
args.ptr_D,
transaction_bytes,
};
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args, int sm_count) {
constexpr uint32_t NumInputTensors = cute::is_void_v<ElementC> ? 1 : 2;
constexpr size_t SizeOfCuTensorMap = sizeof(cute::TmaDescriptor);
// Allocate gmem space for input tensormaps per each SM, A tensormap copies followed by B tensormap copies
return (NumInputTensors * SizeOfCuTensorMap * sm_count) + FusionCallbacks::get_workspace_size(problem_shape, args.thread);
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
return FusionCallbacks::initialize_workspace(problem_shape, args.thread, workspace, stream, cuda_adapter);
}
template <class ProblemShape>
static bool
can_implement(
ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
auto problem_shape_MNKL = append<4>(problem_shape.get_host_problem_shape(), 1);
auto [M,N,K,L] = problem_shape_MNKL;
bool implementable = true;
if constexpr (is_destination_supported) {
constexpr int tma_alignment_bits_D = cutlass::detail::get_output_alignment_bits<ElementD>();
constexpr int min_tma_aligned_elements_D = tma_alignment_bits_D / cutlass::sizeof_bits<ElementD>::value;
implementable = cutlass::detail::check_alignment<min_tma_aligned_elements_D>(cute::make_shape(M,N,L), InternalStrideD{});
}
if constexpr (not cute::is_void_v<ElementC>) {
constexpr int tma_alignment_bits_C = cutlass::detail::get_input_alignment_bits<ElementC>();
constexpr int min_tma_aligned_elements_C = tma_alignment_bits_C / cutlass::sizeof_bits<ElementC>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_C>(cute::make_shape(M,N,L), InternalStrideC{});
}
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n");
}
bool fusion_implementable = FusionCallbacks::can_implement(problem_shape, args.thread);
if (!fusion_implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum requirements for FusionCallbacks.\n");
}
bool beta_implementable = true;
if constexpr (cute::is_void_v<ElementC>) {
if constexpr (detail::has_beta<Arguments>::value) {
beta_implementable = args.thread.beta == 0.0;
}
if constexpr (detail::has_beta_ptr<Arguments>::value) {
beta_implementable = beta_implementable && args.thread.beta_ptr == nullptr;
}
}
if (!beta_implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Beta/beta pointer was set, but epilogue is sourceless (void-C).\n");
}
return implementable && fusion_implementable && beta_implementable;
}
template<class TileShapeMNK>
CUTLASS_HOST_DEVICE
static constexpr int
get_load_pipe_increment(TileShapeMNK tile_shape_MNK) {
// Compute number of epilogue subtiles
return size<1>(zipped_divide(make_layout(take<0,2>(tile_shape_MNK)), EpilogueTile{}));
}
template<class TileShapeMNK>
CUTLASS_HOST_DEVICE
static constexpr int
get_store_pipe_increment(TileShapeMNK tile_shape_MNK) {
return get_load_pipe_increment(tile_shape_MNK);
}
CUTLASS_HOST_DEVICE
CollectiveEpilogue(Params const& params_, TensorStorage& shared_tensors)
: params(params_), fusion_callbacks(params_.thread, shared_tensors.thread) {}
CUTLASS_DEVICE
bool
is_producer_load_needed() const {
return fusion_callbacks.is_producer_load_needed();
}
CUTLASS_DEVICE auto
load_init(Params const& params, int32_t const sm_count, int32_t const sm_idx) const {
// Initialize tma for loading
constexpr bool IsLoad = true;
auto load_tensormaps = tensormaps_init<IsLoad>(params, sm_count, sm_idx);
return load_tensormaps;
}
template<
class ProblemShapeMNKL,
class TileShapeMNK,
class TileCoordMNKL,
class TiledMma,
class TensorMapC
>
CUTLASS_DEVICE auto
load(
LoadPipeline load_pipeline,
LoadPipelineState load_pipe_producer_state,
ProblemShapeMNKL problem_shape_mnkl,
TileShapeMNK tile_shape_MNK,
TileCoordMNKL tile_coord_mnkl,
TiledMma tiled_mma,
int thread_idx,
TensorStorage& shared_tensors,
TensorMapC const& load_tensormap,
int subtile_idx=-1,
bool return_prior_state = false) {
using namespace cute;
// Indexing variables
auto [M, N, K, L] = problem_shape_mnkl;
auto [m_coord, n_coord, k_coord, l_coord] = tile_coord_mnkl;
static_assert(!is_im2col_D, "Do not support im2col");
auto coord_shape = append<3>(make_shape(m_coord, n_coord), Int<0>{});
// Represent the full source tensor, slice to get the tile this CTA is currently responsible for
Tensor mC_mn = params.tma_load_c.get_tma_tensor(append<3>(make_shape(M,N), Int<1>{})); // (M,N,L)
Tensor mC = coalesce(mC_mn, take<0,2>(CtaTileMNK{}));
Tensor gC = local_tile(mC, take<0,2>(CtaTileMNK{}), coord_shape); // (CTA_M,CTA_N)
// Apply epilogue subtile, get matching smem tensor
auto ptr_sC = shared_tensors.collective.smem_C.begin();
Tensor gC_epi = flat_divide(gC, EpilogueTile{}); // (EPI_TILE_M,EPI_TILE_N,EPI_M,EPI_N)
Tensor sC_epi = make_tensor(make_smem_ptr(ptr_sC), SmemLayoutC{}); // (EPI_TILE_M,EPI_TILE_N,PIPE_C)
// Prepare the thread(b)lock's (G)mem to (S)mem TMA tiled copy (bGS_)
ThrCopy thrblk_g2s = params.tma_load_c.get_slice(Int<0>{});
Tensor bGS_gC = thrblk_g2s.partition_S(gC_epi); // (G2S,G2S_M,G2S_N,EPI_M,EPI_N)
Tensor bGS_sC = thrblk_g2s.partition_D(sC_epi); // (G2S,G2S_M,G2S_N,PIPE_C)
// Get the fusion callbacks for the producer load warp
auto pld_args = cutlass::epilogue::fusion::detail::ProducerLoadArgs{
problem_shape_mnkl,
CtaTileMNK{},
tile_coord_mnkl,
tiled_mma,
EpilogueTile{},
thread_idx
};
auto pld_callbacks = fusion_callbacks.get_producer_load_callbacks(pld_args);
bool is_C_load_needed = is_source_supported && fusion_callbacks.is_C_load_needed();
// Predication for TMA load (one thread issues TMA load)
bool issue_tma_load = cute::elect_one_sync();
// Acquire the lock for the first stage
uint64_t* tma_barrier = load_pipeline.producer_get_barrier(load_pipe_producer_state);
load_pipeline.producer_acquire(load_pipe_producer_state);
// Pre-loop fusion callback entry point
pld_callbacks.begin(tma_barrier, load_pipe_producer_state.count(), issue_tma_load);
auto prior_state = load_pipe_producer_state;
CUTLASS_PRAGMA_UNROLL
for (int epi_n = 0; epi_n < size<3>(gC_epi); ++epi_n) {
CUTLASS_PRAGMA_UNROLL
for (int epi_m = 0; epi_m < size<2>(gC_epi); ++epi_m) {
if (subtile_idx != -1 && (epi_n * static_cast<int>(size<2>(gC_epi)) + epi_m) != subtile_idx) {
continue;
}
// Acquire the lock for this stage
constexpr uint16_t mcast_mask = 0;
uint64_t* tma_barrier = load_pipeline.producer_get_barrier(load_pipe_producer_state);
load_pipeline.producer_acquire(load_pipe_producer_state);
// Loop fusion callback entry point
pld_callbacks.step(tma_barrier, epi_m, epi_n, load_pipe_producer_state.count(), issue_tma_load);
// Execute the TMA load for C if needed
if (issue_tma_load && is_C_load_needed) {
copy(params.tma_load_c.with(load_tensormap, *tma_barrier, mcast_mask),
bGS_gC(_,_,_,epi_m,epi_n), bGS_sC(_,_,_,load_pipe_producer_state.index()));
load_pipeline.producer_expect_transaction(load_pipe_producer_state);
}
// Commit TMA loads for this stage and release the lock
load_pipeline.producer_commit(load_pipe_producer_state);
prior_state = load_pipe_producer_state;
++load_pipe_producer_state;
}
}
// Post-loop fusion callback entry point
pld_callbacks.end();
if (not return_prior_state) {
return load_pipe_producer_state;
} else {
return prior_state;
}
}
CUTLASS_DEVICE auto
load_tail(
LoadPipeline load_pipeline,
LoadPipelineState load_pipe_producer_state) {
bool issue_tma_load = cute::elect_one_sync();
if (issue_tma_load) {
load_pipeline.producer_tail(load_pipe_producer_state);
}
return load_pipe_producer_state;
}
template<
class ProblemShapeMNKL,
class TileShapeMNK,
class TileCoordMNKL,
class AccEngine, class AccLayout,
class TiledMma,
class TensorMapD
>
CUTLASS_DEVICE auto
store(
LoadPipeline load_pipeline,
LoadPipelineState load_pipe_consumer_state,
StorePipeline store_pipeline,
StorePipelineState store_pipe_producer_state,
ProblemShapeMNKL problem_shape_mnkl,
TileShapeMNK tile_shape_MNK,
TileCoordMNKL tile_coord_mnkl,
cute::Tensor<AccEngine,AccLayout> accumulators,
TiledMma tiled_mma,
int thread_idx,
TensorStorage& shared_tensors,
TensorMapD const& store_tensormap,
int subtile_idx=-1) {
using namespace cute;
using ElementAccumulator = typename AccEngine::value_type;
using ElementCompute_ = typename epilogue::fusion::FusionCallbacksTraits<FusionCallbacks>::ElementCompute;
using ElementCompute = cute::conditional_t<cute::is_void_v<ElementCompute_>,ElementAccumulator,ElementCompute_>;
static_assert(is_rmem<AccEngine>::value, "Accumulator must be RF resident.");
static_assert(rank(AccLayout{}) == 3, "Accumulator must be MMA-partitioned: (MMA,MMA_M,MMA_N)");
static_assert(rank(ProblemShapeMNKL{}) == 4, "ProblemShapeMNKL must be rank 4");
static_assert(is_static<TileShapeMNK>::value, "TileShapeMNK must be static");
static_assert(rank(TileShapeMNK{}) == 3, "TileShapeMNK must be rank 3");
static_assert(rank(TileCoordMNKL{}) == 4, "TileCoordMNKL must be rank 4");
// Indexing variables
auto [M, N, K, L] = problem_shape_mnkl;
auto [m_coord, n_coord, k_coord, l_coord] = tile_coord_mnkl;
static_assert(!is_im2col_D, "Do not support im2col");
auto coord_shape = append<3>(make_shape(m_coord, n_coord), Int<0>{});
// Represent the full output tensor, slice to get the tile this CTA is responsible for
Tensor mD_mn = params.tma_store_d.get_tma_tensor(append<3>(make_shape(M,N), Int<1>{})); // (M,N,L)
Tensor mD = coalesce(mD_mn, take<0,2>(CtaTileMNK{}));
Tensor gD = local_tile(mD, take<0,2>(CtaTileMNK{}), coord_shape); // (CTA_M,CTA_N)
// Apply epilogue subtiling
Tensor gD_epi = flat_divide(gD, EpilogueTile{}); // (EPI_TILE_M,EPI_TILE_N,EPI_M,EPI_N)
// Construct the corresponding pipelined smem tensors
auto ptr_sC = shared_tensors.collective.smem_C.begin();
auto ptr_sD = shared_tensors.collective.smem_D.begin();
Tensor sC_epi = cute::as_position_independent_swizzle_tensor(
make_tensor(make_smem_ptr(ptr_sC), SmemLayoutC{})); // (EPI_TILE_M,EPI_TILE_N,PIPE_C)
Tensor sD_epi = cute::as_position_independent_swizzle_tensor(
make_tensor(make_smem_ptr(ptr_sD), SmemLayoutD{})); // (EPI_TILE_M,EPI_TILE_N,PIPE_D)
TiledCopy tiled_copy_C_atom = make_tiled_copy_C_atom(CopyAtomC{}, tiled_mma);
// (t)hread-partition for (r)egister to (s)mem copy (tRS_)
TiledCopy tiled_r2s = make_tiled_copy_S(Copy_Atom<CopyOpR2S,SmemElementD>{}, tiled_copy_C_atom);
ThrCopy thread_r2s = tiled_r2s.get_slice(thread_idx);
Tensor tRS_rAcc = thread_r2s.retile_S(accumulators); // ((R2S,R2S_V),MMA_M,MMA_N)
Tensor tRS_sD = thread_r2s.partition_D(sD_epi); // (R2S,R2S_M,R2S_N,PIPE_D)
auto mma_tile_m = size<0>(TileShapeMNK{}) / size<1>(tRS_rAcc);
auto mma_tile_n = size<1>(TileShapeMNK{}) / size<2>(tRS_rAcc);
auto epi_tile_m = size<0>(EpilogueTile{});
auto epi_tile_n = size<1>(EpilogueTile{});
// Allocate D registers
Layout tRS_rD_layout = make_layout(take<0,3>(shape(thread_r2s.partition_S(sD_epi))));
Tensor tRS_rD = make_tensor<SmemElementD>(tRS_rD_layout); // (R2S,R2S_M,R2S_N)
// Vectorized fragment view
constexpr int FragmentSize = DispatchPolicy::FragmentSize;
Tensor tRS_rAcc_frg = recast<Array<ElementAccumulator, FragmentSize>>(tRS_rAcc);
Tensor tRS_rD_frg = recast<Array<SmemElementD , FragmentSize>>(tRS_rD);
CUTE_STATIC_ASSERT(size<0>(tRS_rAcc) % FragmentSize == 0, "Fragment size does not vectorize properly");
// (t)hread-partition for (s)mem to (r)egister copy (tSR_)
TiledCopy tiled_s2r = make_tiled_copy_S(Copy_Atom<CopyOpS2R, SmemElementC>{}, tiled_copy_C_atom);
ThrCopy thread_s2r = tiled_s2r.get_slice(thread_idx);
Tensor tSR_sC = thread_s2r.partition_S(sC_epi); // (S2R,S2R_M,S2R_N,PIPE_C)
Layout tSR_rC_layout = thread_s2r.retile_D(tRS_rD).layout(); // (S2R,S2R_M,S2R_N)
// Allocate C registers
// If C smem load is a non-vectorized dst(i) = src(i) then we can allocate C registers directly in the compute type
// to eliminate some redundant pack+unpack instruction sequences for sub-word types
constexpr bool IsDirectS2R = cute::is_same_v<CopyOpS2R, AutoVectorizingCopyWithAssumedAlignment<128>>
&& decltype(max_common_vector(tSR_rC_layout, tSR_sC.layout()))::value <= 1;
using RegisterElementC = cute::conditional_t<IsDirectS2R, ElementCompute, SmemElementC>;
Tensor tRS_rC = make_tensor<RegisterElementC>(tRS_rD_layout); // (R2S,R2S_M,R2S_N)
Tensor tSR_rC = thread_s2r.retile_D(tRS_rC); // (S2R,S2R_M,S2R_N)
// thread(b)lock-partition for (s)mem to (g)mem copy (bSG_)
ThrCopy thrblk_s2g = params.tma_store_d.get_slice(Int<0>{});
Tensor bSG_sD = thrblk_s2g.partition_S(sD_epi); // (S2G,S2G_M,S2G_N,PIPE_D)
Tensor bSG_gD = thrblk_s2g.partition_D(gD_epi); // (S2G,S2G_M,S2G_N,EPI_M,EPI_N)
// OOB predication for tile quantization "residue"
// Absolute coordinate tensors (dynamic)
Tensor mD_crd = make_identity_tensor(make_shape(M,N)); // (M,N)
Tensor cD_mn = local_tile(mD_crd, take<0,2>(CtaTileMNK{}), make_coord(m_coord, n_coord)); // (CTA_M,CTA_N)
Tensor tRS_cD_mn = thread_r2s.partition_S(flat_divide(cD_mn, EpilogueTile{})); // (R2S,R2S_M,R2S_N,EPI_M,EPI_N)
// Relative coordinate tensors (static)
Tensor cD = make_counting_tensor(cD_mn.layout()); // (CTA_M,CTA_N)
Tensor tRS_cD = make_counting_tensor(tRS_cD_mn.layout()); // (R2S,R2S_M,R2S_N,EPI_M,EPI_N)
// Subtract the global "bottom right" corner from the local "top left" corner to get the max relative coordinate
auto residue_cD = make_coord(M,N) - cD_mn(_0{}); // (m,n)
auto residue_tRS_cD = make_coord(M,N) - tRS_cD_mn(_0{}); // (m,n)
CUTE_STATIC_ASSERT(epi_tile_m % mma_tile_m == 0, "MMA_TILE_M must divide EPI_TILE_M");
CUTE_STATIC_ASSERT(mma_tile_n % epi_tile_n == 0, "EPI_TILE_N must divide MMA_TILE_N");
// Get the fusion callbacks for the consumer store warps
constexpr bool RefSrc = true; // Register tensors reference R2S copy src layout
auto cst_args = cutlass::epilogue::fusion::detail::ConsumerStoreArgs{
problem_shape_mnkl,
CtaTileMNK{},
tile_coord_mnkl,
tiled_mma,
EpilogueTile{},
tiled_r2s,
cD,
residue_cD,
tRS_cD,
residue_tRS_cD,
tRS_rC,
thread_idx
};
auto cst_callbacks = fusion_callbacks.get_consumer_store_callbacks<RefSrc>(cst_args);
bool is_producer_load_needed = fusion_callbacks.is_producer_load_needed();
bool is_C_load_needed = is_source_supported && fusion_callbacks.is_C_load_needed();
// Thread synchronizer for previously issued waits or fences
// to ensure visibility of smem reads/writes to threads or TMA unit
auto synchronize = [&] () { cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::EpilogueBarrier); };
// Predication for TMA store (one warp issues TMA store)
bool issue_tma_store = (thread_idx / NumThreadsPerWarp) == 0;
// In the reuse smem configuration we have StagesC smem buffers and at most StagesD committed TMA stores in flight.
// The TMA store pipeline producer acquire returns when at most StagesD-1 committed stores are in-flight, so we can
// only guarantee store completion after StagesD iterations, then we can begin issuing releases on the smem buffer locks.
// store_pipe_producer_state tracks the acquire and load_pipe_consumer_state tracks the release, in circular buffer fashion.
LoadPipelineState load_wait_state = load_pipe_consumer_state;
if constexpr (ReuseSmemC) {
load_wait_state = store_pipe_producer_state;
load_wait_state.phase_ ^= 1;
}
// We can delay issue of TMA store by one iteration to achieve better interleaving of non-TMA instructions
// Sync requirements of smem reuse may preclude this optimization
// Delayed stores cause delayed stage releases which causes deadlock when StagesC == StagesD
int epi_m_prev = 0, epi_n_prev = 0;
static_assert(not (DelayTmaStore and ReuseSmemC and StagesC == StagesD), "This TMA epilogue configuration will deadlock");
// The TMA store sequence for one subtile iteration
auto tma_store_fn = [&] (int epi_m, int epi_n) {
// Write the tile from smem to gmem with TMA
cutlass::arch::fence_view_async_shared(); // ensure smem writes are visible to TMA
synchronize(); // ensure all threads have issued their async fence
if constexpr (is_destination_supported) {
if (issue_tma_store) {
copy(params.tma_store_d.with(store_tensormap), bSG_sD(_,_,_,store_pipe_producer_state.index()), bSG_gD(_,_,_,epi_m,epi_n));
}
}
// Post async fence, pre TMA commit callback entry point
cst_callbacks.tma_store(epi_m, epi_n, store_pipe_producer_state.count(), issue_tma_store);
// Commit the TMA stores for this stage
if (issue_tma_store) {
store_pipeline.producer_commit(store_pipe_producer_state);
}
++store_pipe_producer_state;
++issued_stores;
// Wait for the next smem buffer to be available
if (issue_tma_store) {
store_pipeline.producer_acquire(store_pipe_producer_state);
}
synchronize();
if constexpr (ReuseSmemC) {
// producer_acquire returns when at most StagesD-1 committed stores are pending
bool store_finished = issued_stores > StorePipeline::UnacquiredStages;
// Let dma warp know earliest smem buffer is consumed and empty after StagesD producer commits
if (store_finished) {
if (is_producer_load_needed) {
load_pipeline.consumer_release(load_pipe_consumer_state);
}
++load_pipe_consumer_state;
}
}
};
//
// BEGIN EPILOGUE
//
// Pre-loop fusion callback entry point
cst_callbacks.begin();
// For each output tile
CUTLASS_PRAGMA_UNROLL
for (int epi_n = 0; epi_n < size<3>(gD_epi); ++epi_n) {
CUTLASS_PRAGMA_UNROLL
for (int epi_m = 0; epi_m < size<2>(gD_epi); ++epi_m) {
bool is_first_iteration = epi_m == 0 && epi_n == 0;
bool is_last_iteration = epi_m == size<2>(gD_epi)-1 && epi_n == size<3>(gD_epi)-1;
if (subtile_idx != -1 && (epi_n * static_cast<int>(size<2>(gD_epi)) + epi_m) != subtile_idx) {
continue;
}
cst_callbacks.begin_loop(epi_m, epi_n);
if (is_producer_load_needed) {
// Wait for the producer load to fill smem
load_pipeline.consumer_wait(load_wait_state);
if (is_C_load_needed) {
// Copy source tile from smem to register
copy(tiled_s2r, tSR_sC(_,_,_,load_wait_state.index()), tSR_rC);
}
}
// First loop fusion callback entry point
cst_callbacks.previsit(epi_m, epi_n, load_wait_state.count(), is_producer_load_needed);
if (is_producer_load_needed) {
if constexpr (not ReuseSmemC) {
// Let producer load warp know smem buffers are consumed and empty
cutlass::arch::fence_view_async_shared();
load_pipeline.consumer_release(load_pipe_consumer_state);
++load_pipe_consumer_state;
}
++load_wait_state;
}
int mma_m = epi_m;
int mma_n = (epi_n * size<1>(EpilogueTile{})) / mma_tile_n;
Tensor tRS_rAcc_frg_mn = tRS_rAcc_frg(_,mma_m,mma_n);
// Vectorized fragment loop with visitor callback entry point
int epi_n_in_mma = epi_n % (mma_tile_n / epi_tile_n);
int r2s_v = epi_n_in_mma * size(tRS_rD_frg);
CUTLASS_PRAGMA_UNROLL
for (int epi_v = 0; epi_v < size(tRS_rD_frg); ++epi_v) {
tRS_rD_frg(epi_v) = cst_callbacks.visit(tRS_rAcc_frg_mn(r2s_v + epi_v), epi_v, epi_m, epi_n);
}
// The latest we can delay the TMA store is right before the smem store of the next iteration
// since the current TMA store needs to be committed before we can acquire the next smem buffer
if constexpr (DelayTmaStore) {
// Issue TMA stores for the previous subtile
if (not is_first_iteration and subtile_idx == -1) {
tma_store_fn(epi_m_prev, epi_n_prev);
}
epi_m_prev = epi_m;
epi_n_prev = epi_n;
}
// Smem reduction callback entry point using current store buffer for workspace
cst_callbacks.reduce(sD_epi(_,_,store_pipe_producer_state.index()),
synchronize, epi_m, epi_n, is_last_iteration, tRS_rD_frg);
// Copy tile from register to smem
if constexpr (is_destination_supported) {
copy(tiled_r2s, tRS_rD, tRS_sD(_,_,_,store_pipe_producer_state.index()));
}
// Post reduction, pre TMA store callback entry point
constexpr bool issue_smem_store = true; // No smem store predication
cst_callbacks.postreduce(epi_m, epi_n, store_pipe_producer_state.count(), issue_smem_store);
if constexpr (not DelayTmaStore) {
// Issue TMA stores for this subtile
tma_store_fn(epi_m, epi_n);
}
cst_callbacks.end_loop(epi_m, epi_n);
} // for epi_m
} // for epi_n
if constexpr (DelayTmaStore) {
// Issue TMA stores for the last subtile
tma_store_fn(epi_m_prev, epi_n_prev);
}
// Post-loop fusion callback entry point
cst_callbacks.end();
return cute::make_tuple(load_pipe_consumer_state, store_pipe_producer_state);
}
CUTLASS_DEVICE auto
store_tail(
LoadPipeline load_pipeline,
LoadPipelineState load_pipe_consumer_state,
StorePipeline store_pipeline,
StorePipelineState store_pipe_producer_state) {
// wait for all TMA stores to complete
store_pipeline.producer_tail(store_pipe_producer_state);
// reset store counter
issued_stores = 0;
if constexpr (ReuseSmemC) {
if (fusion_callbacks.is_producer_load_needed()) {
// Issue releases on up to StagesD-1 previously issued TMA stores
constexpr int release_stages = cute::min(StorePipeline::UnacquiredStages, get_load_pipe_increment(CtaTileMNK{}));
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < release_stages; ++stage) {
load_pipeline.consumer_release(load_pipe_consumer_state);
++load_pipe_consumer_state;
}
}
}
return cute::make_tuple(load_pipe_consumer_state, store_pipe_producer_state);
}
CUTLASS_DEVICE auto
store_init(Params const& params, int32_t const sm_count, int32_t const sm_idx) const {
// Initialize tma
constexpr bool IsLoad = false;
auto store_tensormaps = tensormaps_init<IsLoad>(params, sm_count, sm_idx);
return store_tensormaps;
}
//
// Methods to perform different parts of TMA/Tensormap modifications
//
template <bool IsLoad>
CUTLASS_DEVICE auto
tensormaps_init(Params const& params, int32_t const sm_count, int32_t const sm_idx) const {
cute::TmaDescriptor* tma_desc = nullptr;
cute::TmaDescriptor* gmem_tensormap = params.tensormaps;
if constexpr (IsLoad) {
if (not cute::is_void_v<ElementC>) {
tma_desc = &gmem_tensormap[sm_idx];
if (cute::elect_one_sync()) {
// Bringing tensormaps from params to gmem for modification later
Tensor pC_tensormap = make_tensor(params.tma_load_c.get_tma_descriptor(), Int<1>{}, Int<1>{});
Tensor gC_tensormap = make_tensor(tma_desc, Int<1>{}, Int<1>{});
copy(recast<uint128_t>(pC_tensormap), recast<uint128_t>(gC_tensormap));
}
}
} else {
int const offset_Ddesc = cute::is_void_v<ElementC> ? 0 : sm_count;
tma_desc = &gmem_tensormap[sm_idx + offset_Ddesc];
if (cute::elect_one_sync()) {
// Bringing tensormaps from params to gmem for modification later
Tensor pD_tensormap = make_tensor(params.tma_store_d.get_tma_descriptor(), Int<1>{}, Int<1>{});
Tensor gD_tensormap = make_tensor(tma_desc, Int<1>{}, Int<1>{});
copy(recast<uint128_t>(pD_tensormap), recast<uint128_t>(gD_tensormap));
}
}
return cute::make_tuple(tma_desc);
}
// Bringing tensormaps to smem (to be done by single thread)
template <bool IsLoad>
CUTLASS_DEVICE
void
tensormaps_fetch_to_smem(
TensorMapStorage& shared_tensormap,
cute::TmaDescriptor const* tensormap) const {
if constexpr (IsLoad) {
if (not cute::is_void_v<ElementC>) {
Tensor gC_tensormap = make_tensor(make_gmem_ptr(tensormap), Int<1>{}, Int<1>{});
Tensor sC_tensormap = make_tensor(make_smem_ptr(&shared_tensormap.smem_tensormap_C), Int<1>{}, Int<1>{});
copy(recast<uint128_t>(gC_tensormap), recast<uint128_t>(sC_tensormap));
}
} else {
Tensor gD_tensormap = make_tensor(make_gmem_ptr(tensormap), Int<1>{}, Int<1>{});
Tensor sD_tensormap = make_tensor(make_smem_ptr(&shared_tensormap.smem_tensormap_D), Int<1>{}, Int<1>{});
copy(recast<uint128_t>(gD_tensormap), recast<uint128_t>(sD_tensormap));
}
cp_async_fence();
cp_async_wait<0>();
}
// Replace address for the global tensor (to be done by single thread)
template <bool IsLoad>
CUTLASS_DEVICE
void
tensormaps_replace_global_address(
TensorMapStorage& shared_tensormap,
Params const& params,
int32_t next_batch) {
// Replacing global_address for the next batch
if constexpr (IsLoad) {
if (not cute::is_void_v<ElementC>) {
cute::tma_descriptor_replace_addr_in_shared_mem(shared_tensormap.smem_tensormap_C,
params.ptr_C[next_batch]);
}
} else {
cute::tma_descriptor_replace_addr_in_shared_mem(shared_tensormap.smem_tensormap_D,
params.ptr_D[next_batch]);
}
}
template <bool IsLoad>
CUTLASS_DEVICE
void
tensormaps_perform_update(
TensorMapStorage& shared_tensormap,
Params const& params,
cute::TmaDescriptor const* tensormap,
int32_t next_batch) {
if (cute::elect_one_sync()) {
// Bringing tensormaps to smem
tensormaps_fetch_to_smem<IsLoad>(shared_tensormap, tensormap);
// Replacing global_address for the next batch
tensormaps_replace_global_address<IsLoad>(shared_tensormap, params, next_batch);
}
}
template <bool IsLoad>
CUTLASS_DEVICE
void
tensormaps_cp_fence_release(
TensorMapStorage& shared_tensormap,
cute::TmaDescriptor const* tensormap,
[[maybe_unused]] uint32_t lane_predicate) {
// Entire warp must do this (ie its aligned)
if constexpr (IsLoad) {
if (not cute::is_void_v<ElementC>) {
tma_descriptor_cp_fence_release(tensormap, shared_tensormap.smem_tensormap_C);
}
} else {
tma_descriptor_cp_fence_release(tensormap, shared_tensormap.smem_tensormap_D);
}
}
template <bool IsLoad>
CUTLASS_DEVICE
void
tensormaps_fence_acquire(cute::TmaDescriptor const* tensormap) {
if constexpr (IsLoad) {
if (not cute::is_void_v<ElementC>) {
cute::tma_descriptor_fence_acquire(tensormap);
}
} else {
cute::tma_descriptor_fence_acquire(tensormap);
}
}
private:
Params const& params;
FusionCallbacks fusion_callbacks;
int issued_stores = 0;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace collective
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/collective/sm90_epilogue_array_tma_warpspecialized.hpp/0 | {
"file_path": "include/cutlass/epilogue/collective/sm90_epilogue_array_tma_warpspecialized.hpp",
"repo_id": "include",
"token_count": 19075
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations used by epilogues.
*/
#pragma once
#include <cuda_fp16.h>
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/activation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element, int ElementsPerAccess>
struct ArrayMaximum {
CUTLASS_HOST_DEVICE
Array<Element, ElementsPerAccess> operator()(
Array<Element, ElementsPerAccess> const &lhs,
Array<Element, ElementsPerAccess> const &rhs) const {
Array<Element, ElementsPerAccess> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
result[i] = platform::max(lhs[i].get(), rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<Element, ElementsPerAccess> operator()(
Array<Element, ElementsPerAccess> const &lhs,
Element rhs) const {
Array<Element, ElementsPerAccess> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
result[i] = platform::max(lhs[i].get(), rhs);
}
return result;
}
};
/// Partial specialization: Element=float
template <int ElementsPerAccess>
struct ArrayMaximum<float, ElementsPerAccess> {
CUTLASS_HOST_DEVICE
Array<float, ElementsPerAccess> operator()(
Array<float, ElementsPerAccess> const &lhs,
Array<float, ElementsPerAccess> const &rhs) const {
Array<float, ElementsPerAccess> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
result[i] = fmax(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<float, ElementsPerAccess> operator()(
Array<float, ElementsPerAccess> const &lhs,
float rhs) const {
Array<float, ElementsPerAccess> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
result[i] = fmax(lhs[i], rhs);
}
return result;
}
};
/// Partial specialization: Element=half
template <int ElementsPerAccess>
struct ArrayMaximum<half_t, ElementsPerAccess> {
CUTLASS_DEVICE
Array<half_t, ElementsPerAccess> operator()(
Array<half_t, ElementsPerAccess> const &lhs,
Array<half_t, ElementsPerAccess> const &rhs) const {
Array<half_t, ElementsPerAccess> result;
#if __CUDA_ARCH__ >= 800
int const kVectorCount = ElementsPerAccess / 2;
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(lhs.raw_data());
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(rhs.raw_data());
__half2 *res_ptr = reinterpret_cast<__half2 *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorCount; ++i) {
res_ptr[i] = __hmax2(lhs_ptr[i], rhs_ptr[i]);
}
static_assert(!(ElementsPerAccess % 2), "Output array must be divisible by vector length.");
#else
__half const *lhs_ptr = reinterpret_cast<__half const *>(lhs.raw_data());
__half const *rhs_ptr = reinterpret_cast<__half const *>(rhs.raw_data());
__half *res_ptr = reinterpret_cast<__half *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
res_ptr[i] = ((lhs_ptr[i] < rhs_ptr[i]) ? rhs_ptr[i] : lhs_ptr[i]);
}
#endif
return result;
}
CUTLASS_DEVICE
Array<half_t, ElementsPerAccess> operator()(
Array<half_t, ElementsPerAccess> const &lhs,
half_t const &rhs) const {
Array<half_t, ElementsPerAccess> result;
#if __CUDA_ARCH__ >= 800
int const kVectorCount = ElementsPerAccess / 2;
__half rhs_raw = reinterpret_cast<__half const &>(rhs);
__half2 rhs_pair = __half2half2(rhs_raw);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(lhs.raw_data());
__half2 *res_ptr = reinterpret_cast<__half2 *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorCount; ++i) {
res_ptr[i] = __hmax2(lhs_ptr[i], rhs_pair);
}
static_assert(!(ElementsPerAccess % 2), "Output array must be divisible by vector length.");
#else
__half const *lhs_ptr = reinterpret_cast<__half const *>(lhs.raw_data());
__half const rhs_raw = reinterpret_cast<__half const &>(rhs);
__half *res_ptr = reinterpret_cast<__half *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
res_ptr[i] = ((lhs_ptr[i] < rhs_raw) ? rhs_raw : lhs_ptr[i]);
}
#endif
return result;
}
};
/// Partial specialization: Element=bfloat16_t
template <int ElementsPerAccess>
struct ArrayMaximum<bfloat16_t, ElementsPerAccess> {
using NvType = __nv_bfloat16;
using NvTypeV2 = __nv_bfloat162;
CUTLASS_DEVICE
Array<bfloat16_t, ElementsPerAccess> operator()(
Array<bfloat16_t, ElementsPerAccess> const &lhs,
Array<bfloat16_t, ElementsPerAccess> const &rhs) const {
Array<bfloat16_t, ElementsPerAccess> result;
#if __CUDA_ARCH__ >= 800
int const kVectorCount = ElementsPerAccess / 2;
NvTypeV2 const *lhs_ptr = reinterpret_cast<NvTypeV2 const *>(lhs.raw_data());
NvTypeV2 const *rhs_ptr = reinterpret_cast<NvTypeV2 const *>(rhs.raw_data());
NvTypeV2 *res_ptr = reinterpret_cast<NvTypeV2 *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorCount; ++i) {
res_ptr[i] = __hmax2(lhs_ptr[i], rhs_ptr[i]);
}
#else
NvType const *lhs_ptr = reinterpret_cast<NvType const *>(lhs.raw_data());
NvType const *rhs_ptr = reinterpret_cast<NvType const *>(rhs.raw_data());
NvType *res_ptr = reinterpret_cast<NvType *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
res_ptr[i] = ((lhs_ptr[i] < rhs_ptr[i]) ? rhs_ptr[i] : lhs_ptr[i]);
}
#endif
return result;
}
CUTLASS_DEVICE
Array<bfloat16_t, ElementsPerAccess> operator()(
Array<bfloat16_t, ElementsPerAccess> const &lhs,
bfloat16_t rhs) const {
Array<bfloat16_t, ElementsPerAccess> result;
#if __CUDA_ARCH__ >= 800
int const kVectorCount = ElementsPerAccess / 2;
NvType rhs_raw = reinterpret_cast<NvType const &>(rhs);
NvTypeV2 rhs_pair = __bfloat162bfloat162(rhs_raw);
NvTypeV2 const *lhs_ptr = reinterpret_cast<NvTypeV2 const *>(lhs.raw_data());
NvTypeV2 *res_ptr = reinterpret_cast<NvTypeV2 *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorCount; ++i) {
res_ptr[i] = __hmax2(lhs_ptr[i], rhs_pair);
}
static_assert(!(ElementsPerAccess % 2), "Output array must be divisible by vector length.");
#else
NvType const *lhs_ptr = reinterpret_cast<NvType const *>(lhs.raw_data());
NvType const rhs_raw = reinterpret_cast<NvType const &>(rhs);
NvType *res_ptr = reinterpret_cast<NvType *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
res_ptr[i] = ((lhs_ptr[i] < rhs_raw) ? rhs_raw : lhs_ptr[i]);
}
#endif
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, int ElementsPerAccess>
struct ReluConditional {
CUTLASS_HOST_DEVICE
void operator()(
bool conditional[],
Array<Element, ElementsPerAccess> const &fragment,
Element threshold) const {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
conditional[i] = !(fragment[i] < threshold);
}
}
};
template <int ElementsPerAccess>
struct ReluConditional<half_t, ElementsPerAccess> {
CUTLASS_DEVICE
void operator()(
bool conditional[],
Array<half_t, ElementsPerAccess> const &fragment,
half_t threshold) const {
__half y = reinterpret_cast<__half const &>(threshold);
__half const *x = reinterpret_cast<__half const *>(fragment.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
conditional[i] = !__hlt(x[i], y);
}
}
};
template <int ElementsPerAccess>
struct ReluConditional<bfloat16_t, ElementsPerAccess> {
CUTLASS_DEVICE
void operator()(
bool conditional[],
Array<bfloat16_t, ElementsPerAccess> const &fragment,
bfloat16_t threshold) const {
__nv_bfloat16 y = reinterpret_cast<__nv_bfloat16 const &>(threshold);
__nv_bfloat16 const *x = reinterpret_cast<__nv_bfloat16 const *>(fragment.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
conditional[i] = !__hlt(x[i], y);
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This is a partial specialization for fused Bias and ReLU. It supports the option of packing
/// ReLU conditionals in a bit vector that may be used by backwards passes as an optimization.
///
/// This class can only be used with cutlass::epilogue::threadblock::EpilogueWithBroadcast<>.
///
/// This base class is meant to define the concept required of the
/// EpilogueWithBroadcast::OutputOp
template <
typename ElementC_,
typename ElementAccumulator_,
typename ElementCompute_,
typename ElementZ_,
int ElementsPerAccess,
bool StoreT_ = true,
typename ElementVector_ = ElementC_
>
class LinearCombinationBiasRelu {
public:
using ElementOutput = ElementC_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementZ = ElementZ_;
using ElementVector = ElementVector_;
using ElementT = uint1b_t;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kCount = kElementsPerAccess;
using ElementwiseOp = ReLu<ElementCompute>;
using BinaryOp = plus<ElementCompute>;
// Indicates that this epilogue applies only one binary operation
static bool const kIsSingleSource = true;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementOutput, kElementsPerAccess>;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using FragmentT = Array<ElementT, kElementsPerAccess>;
/// If true, the 'Z' tensor is stored
static bool const kStoreZ = true;
/// If true, the 'T' tensor is stored
static bool const kStoreT = StoreT_;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
ElementZ threshold; ///< ReLu threshold
//
// Methods
//
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute()),
alpha_ptr(nullptr),
beta_ptr(nullptr),
threshold(ElementCompute()) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta,
ElementCompute threshold_ = ElementCompute()
):
alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {
NumericConverter<ElementZ, ElementCompute> convert_threshold;
threshold = convert_threshold(threshold_);
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha
): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr), threshold(ElementZ()) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr,
ElementCompute threshold_ = ElementCompute()
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
NumericConverter<ElementZ, ElementCompute> convert_threshold;
threshold = convert_threshold(threshold_);
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr), threshold(ElementZ()) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
ElementZ threshold_;
public:
//
// Methods
//
/// Constructor from Params
CUTLASS_HOST_DEVICE
LinearCombinationBiasRelu(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
threshold_ = params.threshold;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
// set to NaN to make ReLU no-op for all except last k partitions
int64_t allones = -1;
threshold_ = reinterpret_cast<ElementZ const &>(allones);
}
}
/// Applies the operation when is_source_needed() is true
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentC const &frag_C,
FragmentCompute const &V) const {
BinaryOp binary_op;
FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute tmp_C = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(frag_C);
FragmentCompute result_Z;
bool conditions[kElementsPerAccess];
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
ElementCompute z = alpha_ * tmp_Accum[i];
z += beta_ * tmp_C[i];
z = binary_op(z, V[i]);
result_Z[i] = z;
}
NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z;
frag_Z = convert_z(result_Z);
//
// Compute condition
//
detail::ReluConditional<ElementZ, kElementsPerAccess> relu_conditional;
relu_conditional(conditions, frag_Z, threshold_);
detail::ArrayMaximum<ElementZ, kElementsPerAccess> maximum_op;
frag_Z = maximum_op(frag_Z, threshold_);
if (kStoreT) {
PackPredicates<kElementsPerAccess> pack_predicates;
frag_T = pack_predicates(conditions);
}
}
/// Applies the operation when is_source_needed() is false
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentCompute const &V) const {
BinaryOp binary_op;
FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute result_Z;
bool conditions[kElementsPerAccess];
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
ElementCompute z = binary_op(alpha_ * tmp_Accum[i], V[i]);
result_Z[i] = z;
}
NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z;
frag_Z = convert_z(result_Z);
//
// Compute condition
//
detail::ReluConditional<ElementZ, kElementsPerAccess> relu_conditional;
relu_conditional(conditions, frag_Z, threshold_);
detail::ArrayMaximum<ElementZ, kElementsPerAccess> maximum_op;
frag_Z = maximum_op(frag_Z, threshold_);
//
// Compute conditions
//
//
// Store
//
if (kStoreT) {
PackPredicates<kElementsPerAccess> pack_predicates;
frag_T = pack_predicates(conditions);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/thread/linear_combination_bias_relu.h/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination_bias_relu.h",
"repo_id": "include",
"token_count": 6788
} | 24 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operation, bias addition, and tensor-tensor
elementwise operations
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/epilogue/thread/detail.hpp"
#include "cutlass/epilogue/thread/scale_type.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
namespace detail {
/// Returns whether a source operand is needed for a combination of binary operation and scale
/// type. Simple specialized checks are made for cases in which 0 is an identity element of
/// the binary operation.
template <class BinaryOp, class ElementCompute, ScaleType::Kind Scale>
CUTLASS_HOST_DEVICE
bool is_binary_op_source_needed(ElementCompute scale) {
if constexpr (cute::is_same_v<BinaryOp, NoOp<ElementCompute>>) {
return false;
}
else if constexpr (cute::is_same_v<BinaryOp, plus<ElementCompute>> || cute::is_same_v<BinaryOp, minus<ElementCompute>>) {
// Cases for binary operators for which 0 is an identity element
if constexpr (Scale == ScaleType::NoBetaScaling) return true;
if constexpr (Scale == ScaleType::OnlyAlphaScaling) return false;
if constexpr (Scale == ScaleType::Nothing) return false;
return scale != ElementCompute(0);
}
return true;
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/** Compute a tensor-tensor broadcast epilogue.
*
* @param ElementOutput_ Data type used to load and store tensors
* @param ElementAccumulator_ Accumulator data type
* @param ElementCompute_ Data type used to compute linear combination
* @param ElementBias_ Data type of Bias elements
* @param ActivationFunctor_ Fused Activation
* @param BinaryOp0_ Binary operation to perform on O0 and C0. detail::NoOp means no operation
* @param BinaryOp1_ Binary operation to perform on O1 and C1. detail::NoOp means no operation
* @param UnaryOp_ Unary operation to perform on final result
* @param Scale Controls the type of Alpha and Beta scaling to perform
* @param Round How values should be rounded in conversions
* @param ElementSource_ Data type used for source operands
*
* Computes the following:
* O0 = alpha * accumulator + bias
* O1 = BinaryOp0(O0, beta * C0)
* O2 = BinaryOp1(O1, beta * C1)
* D = UnaryOp(O2)
*/
template <
class ElementOutput_,
class ElementAccumulator_ = ElementOutput_,
class ElementCompute_ = ElementOutput_,
class ElementBias_ = ElementCompute_,
template <class T> class ActivationFunctor_ = Identity,
template <class T> class BinaryOp0_ = plus,
template <class T> class BinaryOp1_ = detail::NoOp,
template <class T> class UnaryOp_ = Identity,
ScaleType::Kind Scale = ScaleType::Default,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
class ElementSource_ = ElementOutput_
>
class LinearCombinationTensorBroadcast {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementScalar = ElementCompute;
using ElementBias = ElementBias_;
using ElementC = ElementSource_;
using ElementD = ElementOutput_;
using ElementScalingFactor = ElementAccumulator_;
using UnaryOp = UnaryOp_<ElementCompute>;
using BinaryOp0 = BinaryOp0_<ElementCompute>;
using BinaryOp1 = BinaryOp1_<ElementCompute>;
using ActivationFunctor = ActivationFunctor_<ElementCompute>;
static constexpr int kCount = 1;
static constexpr ScaleType::Kind kScale = Scale;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
using FragmentBias = Array<ElementBias, kCount>;
static constexpr FloatRoundStyle kRound = Round;
using NoOpType = detail::NoOp<ElementCompute>;
static constexpr bool IsBinaryOp0Enabled = !cute::is_same_v<BinaryOp0, NoOpType>;
static constexpr bool IsBinaryOp1Enabled = !cute::is_same_v<BinaryOp1, NoOpType>;
static constexpr bool IsUnaryOpEnabled = !cute::is_same_v<UnaryOp, NoOpType> && !cute::is_same_v<UnaryOp, Identity<ElementCompute>>;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha{}; ///< scales accumulators
ElementCompute beta{}; ///< scales source tensor
ElementCompute const* alpha_ptr = nullptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const* beta_ptr = nullptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
Params() = default;
CUTLASS_HOST_DEVICE
Params(ElementCompute const* alpha_ptr, ElementCompute const* beta_ptr)
: alpha_ptr(alpha_ptr),
beta_ptr(beta_ptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute const* alpha_ptr)
: alpha_ptr(alpha_ptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute alpha,
ElementCompute beta)
: alpha(alpha),
beta(beta) {}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationTensorBroadcast(Params const& params)
: alpha_(params.alpha_ptr ? *params.alpha_ptr : params.alpha),
beta_(params.beta_ptr ? *params.beta_ptr : params.beta) {}
/// Returns true if source 0 is needed
CUTLASS_HOST_DEVICE
bool is_source0_needed() const {
return detail::is_binary_op_source_needed<BinaryOp0, ElementCompute, Scale>(beta_);
}
/// Returns true if source 1 is needed
CUTLASS_HOST_DEVICE
bool is_source1_needed() const {
return detail::is_binary_op_source_needed<BinaryOp1, ElementCompute, Scale>(beta_);
}
//
// Specialization for scalar
//
CUTLASS_HOST_DEVICE
ElementD operator()(ElementAccumulator const accumulator, ElementC const source0, ElementC source1, ElementBias const bias) {
// Convert everything to Compute type, do compute, and then store to output type
NumericConverter<ElementCompute, ElementAccumulator, Round> accumulator_converter;
NumericConverter<ElementCompute, ElementBias, Round> bias_converter;
NumericConverter<ElementCompute, ElementC, Round> source_converter;
NumericConverter<ElementD, ElementCompute, Round> destination_converter;
ActivationFunctor act;
multiplies<ElementCompute> mul;
multiply_add<ElementCompute> madd;
ElementCompute intermediate = accumulator_converter(accumulator);
intermediate = madd(alpha_, intermediate, bias_converter(bias));
intermediate = act(intermediate);
// Apply BinaryOp0, if needed
if constexpr (IsBinaryOp0Enabled) {
BinaryOp0 bin0;
ElementCompute converted_source = source_converter(source0);
intermediate = bin0(intermediate, mul(beta_, converted_source));
}
// Apply BinaryOp1, if needed
if constexpr (IsBinaryOp1Enabled) {
BinaryOp1 bin1;
ElementCompute converted_source = source_converter(source1);
intermediate = bin1(intermediate, mul(beta_, converted_source));
}
// Apply UnaryOp, if needed
if constexpr (IsUnaryOpEnabled) {
UnaryOp unary;
intermediate = unary(intermediate);
}
return destination_converter(intermediate);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/thread/linear_combination_tensor_broadcast.hpp/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination_tensor_broadcast.hpp",
"repo_id": "include",
"token_count": 3027
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#include <cuda/std/utility>
#else
#include <assert.h>
#include <utility>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/numeric_types.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This base class is meant to define the concept required of the
/// EpilogueWithBroadcast::OutputOp
template <
typename ElementC_,
typename ElementAccumulator_,
typename ElementCompute_,
typename ElementZ_,
typename ElementT_,
int ElementsPerAccess,
bool StoreZ = true,
bool StoreT = true
>
struct EpilogueWithBroadcastOpBase {
using ElementOutput = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementZ = ElementZ_;
using ElementT = ElementT_;
static int const kElementsPerAccess = ElementsPerAccess;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementOutput, kElementsPerAccess>;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using FragmentT = Array<ElementT, kElementsPerAccess>;
/// If true, the 'Z' tensor is stored
static bool const kStoreZ = StoreZ;
/// If true, the 'T' tensor is stored
static bool const kStoreT = StoreT;
/// Parameters structure - required
struct Params { };
//
// Methods
//
/// Constructor from Params
EpilogueWithBroadcastOpBase(Params const ¶ms_) { }
/// Determine if the source is needed. May return false if
bool is_source_needed() const {
return true;
}
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) { }
/// Applies the operation when is_source_needed() is true
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentC const &frag_C1,
FragmentC const &frag_C2,
FragmentCompute const &V) const {
}
/// Applies the operation when is_source_needed() is false
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentCompute const &V) const {
}
};
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator with bias vector broadcast over columns.
///
/// Computes the following:
///
///
/// Z, T = OutputOp(AB, C, Broadcast)
///
/// if (ElementwiseOp::kStoreZ) {
/// store(converted_u);
/// }
///
/// if (ElementwiseOp::kStoreT) {
/// store(v);
/// }
///
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors (z)
typename TensorTileIterator_, ///< Additional tile iterator for tensor-valued operands (t)
typename ElementVector_, ///< Pointer to broadcast vector
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator - concept is EpilogueWithBroadcastOp
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large
(!IsEpilogueFunctorHeavy<OutputOp_>::value),
bool IsSingleSource = OutputOp_::kIsSingleSource
>
class EpilogueWithBroadcast;
template <
typename Shape_,
typename WarpMmaOperator_,
int PartitionsK,
typename OutputTileIterator_,
typename TensorTileIterator_,
typename ElementVector_,
typename AccumulatorFragmentIterator_,
typename WarpTileIterator_,
typename SharedLoadIterator_,
typename OutputOp_,
typename Padding_,
int FragmentsPerPartition,
int IterationsUnroll
>
class EpilogueWithBroadcast<
Shape_,
WarpMmaOperator_,
PartitionsK,
OutputTileIterator_,
TensorTileIterator_,
ElementVector_,
AccumulatorFragmentIterator_,
WarpTileIterator_,
SharedLoadIterator_,
OutputOp_,
Padding_,
FragmentsPerPartition,
IterationsUnroll,
false
> :
public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition> {
public:
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
static bool const kIsSingleSource = false;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using TensorTileIterator = TensorTileIterator_;
using ElementVector = ElementVector_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Compute data type produced by the output op
using ElementCompute = typename OutputOp::ElementCompute;
/// Compute fragment
using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>;
/// Thread map used by output tile iterators
using ThreadMap = typename OutputTileIterator::ThreadMap;
/// Fragment object used to store the broadcast values
using BroadcastFragment = Array<
ElementCompute,
ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Data type of additional tensor
using ElementTensor = typename TensorTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>;
/// Tensor access type
using TensorAccessType = Array<ElementTensor, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
/// Shared memory allocation from epilogue base class
using BaseSharedStorage = typename Base::SharedStorage;
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
/// Used for the broadcast
struct BroadcastDetail {
/// Number of threads per warp
static int const kWarpSize = 32;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
/// Number of distinct scalar column indices handled by each thread
static int const kColumnsPerThread = ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess;
/// Number of distinct scalar row indices handled by each thread
static int const kRowsPerThread = ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn;
/// Number of threads per threadblock
static int const kThreadCount = kWarpSize * WarpCount::kCount;
/// Number of distinct threads per row of output tile
static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread);
/// Number of distinct threads which must be reduced during the final reduction phase within the threadblock.
static int const kThreadRows = kThreadCount / kThreadsPerRow;
/// I'm not sure what I meant here.
static int const kThreadAccessesPerRow = const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount);
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
kThreadRows,
Shape::kN
>;
/// Debug printing
CUTLASS_DEVICE
static void print() {
#if 0
printf("BroadcastDetail {\n");
printf(
" kColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n"
"kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n",
kColumnsPerThread,
kRowsPerThread,
kThreadCount,
kThreadsPerRow,
kThreadRows,
kThreadAccessesPerRow,
StorageShape::kRow,
StorageShape::kColumn,
StorageShape::kCount
);
printf("};\n");
#endif
}
};
/// Shared storage structure (shadows base) with additional SMEM buffer for reduction
struct SharedStorage {
union {
BaseSharedStorage base;
};
CUTLASS_HOST_DEVICE
SharedStorage() { }
};
public:
static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Thread index within the threadblock
int thread_idx_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithBroadcast(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
Base(shared_storage.base, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.base.reference(), thread_idx),
thread_idx_(thread_idx)
{
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
ElementVector const * broadcast_ptr, ///< Broadcast vector
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator1, ///< Tile iterator for first source accumulator matrix
OutputTileIterator source_iterator2, ///< Tile iterator for second source accumulator matrix
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord()) {
BroadcastFragment broadcast_fragment;
load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset);
if (!output_op.is_source_needed()) {
compute_source_not_needed_(
output_op,
broadcast_fragment,
destination_iterator,
accumulators,
tensor_iterator);
}
else {
compute_source_needed_(
output_op,
broadcast_fragment,
destination_iterator,
accumulators,
source_iterator1,
source_iterator2,
tensor_iterator);
}
}
private:
CUTLASS_DEVICE
void load_broadcast_fragment_(
BroadcastFragment & broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
ElementVector const * broadcast_ptr, ///< Broadcast vector
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space
) {
broadcast_fragment.clear();
// If no pointer is supplied, set with all zeros and avoid memory accesses
if (!broadcast_ptr) {
return;
}
int thread_initial_column = ThreadMap::initial_offset(thread_idx_).column();
int thread_column_idx = threadblock_offset.column() + thread_initial_column;
broadcast_ptr += thread_initial_column;
NumericArrayConverter<ElementCompute, ElementVector, BroadcastDetail::kElementsPerAccess> converter;
using AccessType = AlignedArray<ElementVector, BroadcastDetail::kElementsPerAccess>;
using ComputeFragmentType = Array<ElementCompute, BroadcastDetail::kElementsPerAccess>;
ComputeFragmentType *frag_ptr = reinterpret_cast<ComputeFragmentType *>(&broadcast_fragment);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < ThreadMap::Iterations::kColumn; ++j) {
AccessType loaded;
loaded.clear();
if (thread_column_idx < problem_size.column()) {
loaded = *reinterpret_cast<AccessType const *>(broadcast_ptr);
}
ComputeFragmentType cvt = converter(loaded);
frag_ptr[j] = cvt;
thread_column_idx += ThreadMap::Delta::kColumn;
broadcast_ptr += ThreadMap::Delta::kColumn;
}
}
template <class Seq>
struct acc2smem_source_not_needed;
template <size_t... Seq>
struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> {
template <int Advance>
CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
warp_tile_iterator.store(accum_fragment);
if (p < Base::kFragmentsPerIteration - 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset);
}
}
if (Base::kFragmentsPerIteration > 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset *
(1 - Base::kFragmentsPerIteration));
}
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {
(pos == (Seq * Base::kFragmentsPerIteration)) &&
(helper<Seq * Base::kFragmentsPerIteration>(iterator_begin, warp_tile_iterator), 0)...};
CUTLASS_UNUSED(dummy[0]);
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const &output_op, ///< Output operator
BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
) {
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
// CUTLASS_PRAGMA_UNROLL
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; iter += Base::kFragmentsPerIteration) {
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_not_needed<
cutlass::make_index_sequence<OutputTileIterator::kIterations /
Base::kFragmentsPerIteration>>::push(iter,
accum_fragment_iterator,
this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
if (p < Base::kFragmentsPerIteration - 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
}
else if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
apply_output_operator_source_not_needed_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
broadcast_fragment);
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator.store(frag_Z);
++destination_iterator;
}
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
if (Base::kFragmentsPerIteration > 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration));
}
}
}
template<class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator1, ///< Tile iterator for first source accumulator matrix
OutputTileIterator source_iterator2, ///< Tile iterator for second source accumulator matrix
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
) {
typename OutputTileIterator::Fragment source_fragment1;
source_fragment1.clear();
typename OutputTileIterator::Fragment source_fragment2;
source_fragment2.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
source_iterator1.load(source_fragment1);
++source_iterator1;
source_iterator2.load(source_fragment2);
++source_iterator2;
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_tile_offset({tile_row_offset , 0});
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0});
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
apply_output_operator_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
source_fragment1,
source_fragment2,
broadcast_fragment);
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator.store(frag_Z);
++destination_iterator;
}
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
typename OutputTileIterator::Fragment &frag_Z,
typename TensorTileIterator::Fragment &frag_T,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &frag_AB,
typename OutputTileIterator::Fragment const &frag_C1,
typename OutputTileIterator::Fragment const &frag_C2,
BroadcastFragment const &frag_Broadcast) {
using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>;
using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z);
AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T);
AccumulatorAccessType const *frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&frag_AB);
OutputAccessType const *frag_C1_ptr =
reinterpret_cast<OutputAccessType const *>(&frag_C1);
OutputAccessType const *frag_C2_ptr =
reinterpret_cast<OutputAccessType const *>(&frag_C2);
AccessTypeBroadcast const *frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(
frag_Z_ptr[i],
frag_T_ptr[i],
frag_AB_ptr[i],
frag_C1_ptr[i],
frag_C2_ptr[i],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed_(
typename OutputTileIterator::Fragment &frag_Z,
typename TensorTileIterator::Fragment &frag_T,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &frag_AB,
BroadcastFragment const &frag_Broadcast) {
using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>;
using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z);
AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T);
AccumulatorAccessType const *frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&frag_AB);
AccessTypeBroadcast const *frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(
frag_Z_ptr[i],
frag_T_ptr[i],
frag_AB_ptr[i],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
public:
/// Stream-K reduce helper
CUTLASS_DEVICE
void reduce(
int reduce_fragment_idx, ///< Reduce fragment index
OutputOp const &output_op, ///< Output operator
ElementVector const * broadcast_ptr, ///< Broadcast vector
OutputTileIterator destination_iterator, ///< Tile iterator for destination
OutputTileIterator source_iterator1, ///< Tile iterator for first source accumulator matrix
OutputTileIterator source_iterator2, ///< Tile iterator for second source accumulator matrix
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord())
{
BroadcastFragment broadcast_fragment;
load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset);
// Initialize/load source-fragment data
typename OutputTileIterator::Fragment source_fragment1;
source_fragment1.clear();
typename OutputTileIterator::Fragment source_fragment2;
source_fragment2.clear();
if (output_op.is_source_needed())
{
source_iterator1 += reduce_fragment_idx;
source_iterator1.load(source_fragment1);
source_iterator2 += reduce_fragment_idx;
source_iterator2.load(source_fragment2);
}
// Load fragment from shared memory
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// Add fragments shared by other k partitions
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
if (!output_op.is_source_needed()) {
apply_output_operator_source_not_needed_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
broadcast_fragment);
} else {
apply_output_operator_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
source_fragment1,
source_fragment2,
broadcast_fragment);
}
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator += reduce_fragment_idx;
destination_iterator.store(frag_Z);
}
if (OutputOp::kStoreT) {
tensor_iterator += reduce_fragment_idx;
tensor_iterator.store(frag_T);
}
}
};
template <
typename Shape_,
typename WarpMmaOperator_,
int PartitionsK,
typename OutputTileIterator_,
typename TensorTileIterator_,
typename ElementVector_,
typename AccumulatorFragmentIterator_,
typename WarpTileIterator_,
typename SharedLoadIterator_,
typename OutputOp_,
typename Padding_,
int FragmentsPerPartition,
int IterationsUnroll
>
class EpilogueWithBroadcast<
Shape_,
WarpMmaOperator_,
PartitionsK,
OutputTileIterator_,
TensorTileIterator_,
ElementVector_,
AccumulatorFragmentIterator_,
WarpTileIterator_,
SharedLoadIterator_,
OutputOp_,
Padding_,
FragmentsPerPartition,
IterationsUnroll,
true
> :
public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition> {
public:
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
static bool const kIsSingleSource = true;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using TensorTileIterator = TensorTileIterator_;
using ElementVector = ElementVector_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Compute data type produced by the output op
using ElementCompute = typename OutputOp::ElementCompute;
/// Compute fragment
using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>;
/// Thread map used by output tile iterators
using ThreadMap = typename OutputTileIterator::ThreadMap;
/// Fragment object used to store the broadcast values
using BroadcastFragment = Array<
ElementCompute,
ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Data type of additional tensor
using ElementTensor = typename TensorTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>;
/// Tensor access type
using TensorAccessType = Array<ElementTensor, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
/// Shared memory allocation from epilogue base class
using BaseSharedStorage = typename Base::SharedStorage;
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
/// Used for the broadcast
struct BroadcastDetail {
/// Number of threads per warp
static int const kWarpSize = 32;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
/// Number of distinct scalar column indices handled by each thread
static int const kColumnsPerThread = ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess;
/// Number of distinct scalar row indices handled by each thread
static int const kRowsPerThread = ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn;
/// Number of threads per threadblock
static int const kThreadCount = kWarpSize * WarpCount::kCount;
/// Number of distinct threads per row of output tile
static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread);
/// Number of distinct threads which must be reduced during the final reduction phase within the threadblock.
static int const kThreadRows = kThreadCount / kThreadsPerRow;
/// I'm not sure what I meant here.
static int const kThreadAccessesPerRow = const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount);
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
kThreadRows,
Shape::kN
>;
/// Debug printing
CUTLASS_DEVICE
static void print() {
#if 0
printf("BroadcastDetail {\n");
printf(
" kColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n"
"kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n",
kColumnsPerThread,
kRowsPerThread,
kThreadCount,
kThreadsPerRow,
kThreadRows,
kThreadAccessesPerRow,
StorageShape::kRow,
StorageShape::kColumn,
StorageShape::kCount
);
printf("};\n");
#endif
}
};
/// Shared storage structure (shadows base) with additional SMEM buffer for reduction
struct SharedStorage {
union {
BaseSharedStorage base;
};
CUTLASS_HOST_DEVICE
SharedStorage() { }
};
public:
static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Thread index within the threadblock
int thread_idx_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithBroadcast(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
Base(shared_storage.base, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.base.reference(), thread_idx),
thread_idx_(thread_idx)
{
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
ElementVector const * broadcast_ptr, ///< Broadcast vector
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord()) {
BroadcastFragment broadcast_fragment;
load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset);
if (!output_op.is_source_needed()) {
compute_source_not_needed_(
output_op,
broadcast_fragment,
destination_iterator,
accumulators,
tensor_iterator);
}
else {
compute_source_needed_(
output_op,
broadcast_fragment,
destination_iterator,
accumulators,
source_iterator,
tensor_iterator);
}
}
private:
CUTLASS_DEVICE
void load_broadcast_fragment_(
BroadcastFragment & broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
ElementVector const * broadcast_ptr, ///< Broadcast vector
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space
) {
broadcast_fragment.clear();
// If no pointer is supplied, set with all zeros and avoid memory accesses
if (!broadcast_ptr) {
return;
}
int thread_initial_column = ThreadMap::initial_offset(thread_idx_).column();
int thread_column_idx = threadblock_offset.column() + thread_initial_column;
broadcast_ptr += thread_initial_column;
NumericArrayConverter<ElementCompute, ElementVector, BroadcastDetail::kElementsPerAccess> converter;
using AccessType = AlignedArray<ElementVector, BroadcastDetail::kElementsPerAccess>;
using ComputeFragmentType = Array<ElementCompute, BroadcastDetail::kElementsPerAccess>;
ComputeFragmentType *frag_ptr = reinterpret_cast<ComputeFragmentType *>(&broadcast_fragment);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < ThreadMap::Iterations::kColumn; ++j) {
AccessType loaded;
loaded.clear();
if (thread_column_idx < problem_size.column()) {
loaded = *reinterpret_cast<AccessType const *>(broadcast_ptr);
}
ComputeFragmentType cvt = converter(loaded);
frag_ptr[j] = cvt;
thread_column_idx += ThreadMap::Delta::kColumn;
broadcast_ptr += ThreadMap::Delta::kColumn;
}
}
template <class Seq>
struct acc2smem_source_not_needed;
template <size_t... Seq>
struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> {
template <int Advance>
CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
warp_tile_iterator.store(accum_fragment);
if (p < Base::kFragmentsPerIteration - 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset);
}
}
if (Base::kFragmentsPerIteration > 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset *
(1 - Base::kFragmentsPerIteration));
}
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {
(pos == (Seq * Base::kFragmentsPerIteration)) &&
(helper<Seq * Base::kFragmentsPerIteration>(iterator_begin, warp_tile_iterator), 0)...};
CUTLASS_UNUSED(dummy[0]);
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const &output_op, ///< Output operator
BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
) {
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
// CUTLASS_PRAGMA_UNROLL
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; iter += Base::kFragmentsPerIteration) {
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_not_needed<
cutlass::make_index_sequence<OutputTileIterator::kIterations /
Base::kFragmentsPerIteration>>::push(iter,
accum_fragment_iterator,
this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
if (p < Base::kFragmentsPerIteration - 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
}
else if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
apply_output_operator_source_not_needed_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
broadcast_fragment);
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator.store(frag_Z);
++destination_iterator;
}
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
if (Base::kFragmentsPerIteration > 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration));
}
}
}
template<class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
) {
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
source_iterator.load(source_fragment);
++source_iterator;
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_tile_offset({tile_row_offset , 0});
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0});
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
apply_output_operator_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
source_fragment,
broadcast_fragment);
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator.store(frag_Z);
++destination_iterator;
}
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
typename OutputTileIterator::Fragment &frag_Z,
typename TensorTileIterator::Fragment &frag_T,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &frag_AB,
typename OutputTileIterator::Fragment const &frag_C,
BroadcastFragment const &frag_Broadcast) {
using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>;
using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z);
AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T);
AccumulatorAccessType const *frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&frag_AB);
OutputAccessType const *frag_C_ptr =
reinterpret_cast<OutputAccessType const *>(&frag_C);
AccessTypeBroadcast const *frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(
frag_Z_ptr[i],
frag_T_ptr[i],
frag_AB_ptr[i],
frag_C_ptr[i],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed_(
typename OutputTileIterator::Fragment &frag_Z,
typename TensorTileIterator::Fragment &frag_T,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &frag_AB,
BroadcastFragment const &frag_Broadcast) {
using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>;
using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z);
AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T);
AccumulatorAccessType const *frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&frag_AB);
AccessTypeBroadcast const *frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(
frag_Z_ptr[i],
frag_T_ptr[i],
frag_AB_ptr[i],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
public:
/// Stream-K reduce helper
CUTLASS_DEVICE
void reduce(
int reduce_fragment_idx, ///< Reduce fragment index
OutputOp const &output_op, ///< Output operator
ElementVector const * broadcast_ptr, ///< Broadcast vector
OutputTileIterator destination_iterator, ///< Tile iterator for destination
OutputTileIterator source_iterator, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord())
{
BroadcastFragment broadcast_fragment;
load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset);
// Initialize/load source-fragment data
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
if (output_op.is_source_needed())
{
source_iterator += reduce_fragment_idx;
source_iterator.load(source_fragment);
}
// Load fragment from shared memory
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// Add fragments shared by other k partitions
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
if (!output_op.is_source_needed()) {
apply_output_operator_source_not_needed_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
broadcast_fragment);
} else {
apply_output_operator_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
source_fragment,
broadcast_fragment);
}
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator.store(frag_Z);
++destination_iterator;
}
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_with_broadcast.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_with_broadcast.h",
"repo_id": "include",
"token_count": 22671
} | 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
BlasMode BlasMode_ = BlasMode::kGemm ///< Tile Iterator for a Symmetric or Hermitian Kernel
>
class PredicatedTileIteratorBlas3 {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static BlasMode const kBlasMode = BlasMode_;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0");
static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0");
static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0");
static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
static_assert( AccessType::kElements == 1, "BLAS3 Epilogue must use AccessType::kElements as 1");
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout):
PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()
)
{
}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorParams params_;
/// Byte-level pointer
uint8_t *byte_pointer_;
/// Fill Mode for a tile on diagonal of a symmetric kernel
cutlass::FillMode fill_mode;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// Internal state counter
int state_[3];
/// Starting address of the matrix
size_t matrix_start_addr;
static_assert((kBlasMode == BlasMode::kSymmetric || kBlasMode == BlasMode::kHermitian),
"Unsupported blas3 mode.");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorBlas3(
PredicatedTileIteratorParams const & params,
Element *pointer,
TensorCoord extent,
int thread_idx,
TensorCoord threadblock_offset
, cutlass::FillMode fill_mode
):
params_(params), fill_mode(fill_mode)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
thread_start_row_ = thread_offset.row();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] = ((thread_offset.column()
+ ThreadMap::Delta::kColumn * c) < extent.column());
}
// Check Symmetric kernel modes (Lower and Upper - for diagonal CTAs, None for rest CTAs)
if ((kBlasMode == BlasMode::kSymmetric || kBlasMode == BlasMode::kHermitian) &&
fill_mode == cutlass::FillMode::kInvalid) {
arch::device_breakpoint();
}
// Starting address of the matrix
matrix_start_addr = reinterpret_cast<size_t>(pointer);
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) +
LongIndex(thread_offset.row()) * LongIndex(params_.stride) +
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn /
kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment on the diagonal of a symmetric kernel to memory
CUTLASS_DEVICE
void load_symmetric_with_byte_offset(Fragment &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
bool isLowerMode = (fill_mode == cutlass::FillMode::kLower) ? true : false;
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
// Offset of row from beginning of the matrix per thread
size_t row_start_offset = (size_t)memory_pointer - matrix_start_addr;
// Absolute row index
int row_index = int(row_start_offset/params_.stride);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
// Offset of column from beginning of row per thread
size_t col_start_offset = row_start_offset +
(column * ThreadMap::Delta::kColumn / kElementsPerAccess) * sizeof(AccessType);
// Absolute column index
size_t col_index = (col_start_offset%params_.stride)/sizeof(AccessType);
guard = guard && ( (isLowerMode && row_index >= col_index) ||
(!isLowerMode && row_index <= col_index) );
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn /
kElementsPerAccess],
guard);
// The imaginary parts of the diagonal elements of a complex element are assumed and set to zero
if (guard && kBlasMode == BlasMode::kHermitian && cutlass::is_complex<Element>::value) {
Element *scalar_ptr = reinterpret_cast<Element *>(frag_ptr);
if (row_index == col_index) {
scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column] =
real(scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column]);
}
}
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
if (fill_mode == cutlass::FillMode::kNone) {
load_with_byte_offset(frag, 0);
}
else {
load_symmetric_with_byte_offset(frag, 0);
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Stores a fragment on the diagonal of a symmetric kernel to memory
CUTLASS_DEVICE
void store_symmetric_with_byte_offset(Fragment const &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
bool isLowerMode = (fill_mode == cutlass::FillMode::kLower) ? true : false;
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
// Offset of row from beginning of the matrix per thread
size_t row_start_offset = (size_t)memory_pointer - matrix_start_addr;
// Absolute row index
int row_index = int(row_start_offset/params_.stride);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
// Offset of column from beginning of row per thread
size_t col_start_offset = row_start_offset +
(column * ThreadMap::Delta::kColumn / kElementsPerAccess) * sizeof(AccessType);
// Absolute column index
size_t col_index = (col_start_offset%params_.stride)/sizeof(AccessType);
guard = guard && ( (isLowerMode && row_index >= col_index) ||
(!isLowerMode && row_index <= col_index) );
// The imaginary parts of the diagonal elements of a complex element are assumed and set to zero
if (guard && kBlasMode == BlasMode::kHermitian && cutlass::is_complex<Element>::value) {
AccessType *frag_ptr_modify = const_cast<AccessType *>(frag_ptr);
Element *scalar_ptr = reinterpret_cast<Element *>(frag_ptr_modify);
if (row_index == col_index) {
scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column] =
real(scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column]);
}
}
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn /
kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
if (fill_mode == cutlass::FillMode::kNone) {
store_with_byte_offset(frag, 0);
}
else {
store_symmetric_with_byte_offset(frag, 0);
}
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorBlas3 &operator++() {
++state_[0];
byte_pointer_ += params_.advance_row;
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
byte_pointer_ += params_.advance_group;
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
byte_pointer_ += params_.advance_cluster;
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
byte_pointer_ += params_.advance_tile;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/predicated_tile_iterator_blas3.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/predicated_tile_iterator_blas3.h",
"repo_id": "include",
"token_count": 8475
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic structures needed for implementing the warp-scoped phase of the epilogue.
These quantities assume a 'column-major' arrangement of TensorOp instructions, of which
a row-oriented slice is visible per iteration.
*/
#pragma once
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
/// Policy details related to the epilogue
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation shape (concept: gemm:GemmShape)
typename Layout ///< target shared memory layout
>
struct TensorOpPolicy;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape ///< matrix multiply operation shape (concept: gemm::GemmShape)
>
struct TensorOpPolicy<WarpShape, OperatorShape, layout::RowMajor> {
/// Number of operations
using OperatorCount = MatrixShape<
(WarpShape::kM + OperatorShape::kM - 1) / OperatorShape::kM,
(WarpShape::kN + OperatorShape::kN - 1) / OperatorShape::kN
>;
//
// Hard-coded constants regarding Tensor Operations
//
static int const kElementsPerAccess = 2;
static int const kRowsPerIteration = 8;
static bool const kDivisible =
!(WarpShape::kM % OperatorShape::kM) && !(WarpShape::kN % OperatorShape::kN);
//
// Derived quantities
//
// Number of 'externally visible' iterations per actual instruction
static int const kIterationsPerInstruction = OperatorShape::kM / kRowsPerIteration;
// Number of externally visible iterations
static int const kIterations = OperatorCount::kRow * kIterationsPerInstruction;
using TileIterations = MatrixShape<kIterations, 1>;
static int const kAccumulatorRowStride = kElementsPerAccess;
static int const kAccumulatorColumnStride = kElementsPerAccess * OperatorCount::kRow * kIterationsPerInstruction;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape ///< matrix multiply operation shape (concept: gemm::GemmShape)
>
struct TensorOpPolicy<WarpShape, OperatorShape, layout::ColumnMajor> {
/// Number of operations
using OperatorCount = MatrixShape<
(WarpShape::kM + OperatorShape::kM - 1) / OperatorShape::kM,
(WarpShape::kN + OperatorShape::kN - 1) / OperatorShape::kN
>;
//
// Hard-coded constants regarding Tensor Operations
//
static int const kElementsPerAccess = 1;
static int const kColumnsPerIteration = 8;
static bool const kDivisible =
!(WarpShape::kM % OperatorShape::kM) && !(WarpShape::kN % OperatorShape::kN);
//
// Derived quantities
//
// Number of 'externally visible' iterations per actual instruction
static int const kIterationsPerInstruction = OperatorShape::kN / kColumnsPerIteration;
// Number of externally visible iterations
static int const kIterations = OperatorCount::kColumn * kIterationsPerInstruction;
using TileIterations = MatrixShape<kIterations, 1>;
// Hard code for 16x8
static int const kAccumulatorRowStride = 2;
static int const kAccumulatorColumnStride = 4 * OperatorCount::kRow;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for column-major-interleaved
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation (concept: arch::Mma)
int InterleavedK ///< number of interleaved k
>
struct TensorOpPolicy<WarpShape, OperatorShape,
layout::ColumnMajorInterleaved<InterleavedK> > {
/// Number of operations
using OperatorCount = MatrixShape<WarpShape::kM / OperatorShape::kM,
WarpShape::kN / OperatorShape::kN>;
//
// Hard-coded constants regarding Tensor Operations
//
static int const kElementsPerAccess = 2;
static int const kRowsPerIteration = 8;
//
// Derived quantities
//
// Number of 'externally visible' iterations per actual instruction
static int const kIterationsPerInstruction =
OperatorShape::kM / kRowsPerIteration;
// Number of externally visible iterations
static int const kIterations = WarpShape::kN / InterleavedK *
OperatorCount::kRow *
kIterationsPerInstruction;
static int const kElementsPerIteration = InterleavedK / OperatorShape::kN * kElementsPerAccess;
static int const kAccessPerIteration = kElementsPerIteration / kElementsPerAccess;
// Number of externally visible iterations
//static int const kTileIterations = OperatorCount::kRow * kIterationsPerInstruction;
using TileIterations = MatrixShape<1, WarpShape::kN / InterleavedK>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/warp/tensor_op_policy.h/0 | {
"file_path": "include/cutlass/epilogue/warp/tensor_op_policy.h",
"repo_id": "include",
"token_count": 2091
} | 28 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a GEMM kernel that computes the absolute maximum of the output tensor
and applies additional scaling factors to operands.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/gemm_universal.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/gemm/kernel/default_gemm_with_absmax.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/device/gemm_universal_base.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Universal GEMM with absolute-maximum calculation and scaling
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassTensorOp,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_ = arch::Sm89,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = cutlass::epilogue::thread::LinearCombinationBiasElementwise<
ElementC_, ElementAccumulator_, ElementAccumulator_,
ElementC_, ElementC_, 128 / cutlass::sizeof_bits<ElementC_>::value>,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB = ComplexTransform::kNone
>
class GemmUniversalWithAbsMax;
// Partial specialization for SM89
template <
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_,
typename ElementAccumulator_,
typename ThreadblockShape_,
typename WarpShape_,
typename InstructionShape_,
typename EpilogueOutputOp_,
typename ThreadblockSwizzle_,
int Stages,
int AlignmentA,
int AlignmentB,
typename Operator_,
ComplexTransform TransformA,
ComplexTransform TransformB
>
class GemmUniversalWithAbsMax<
ElementA_,
LayoutA_,
ElementB_,
LayoutB_,
ElementC_,
LayoutC_,
ElementAccumulator_,
arch::OpClassTensorOp,
arch::Sm89,
ThreadblockShape_,
WarpShape_,
InstructionShape_,
EpilogueOutputOp_,
ThreadblockSwizzle_,
Stages,
AlignmentA,
AlignmentB,
Operator_,
TransformA,
TransformB
> :
public GemmUniversalBase<
typename kernel::DefaultGemmWithAbsMax<
ElementA_,
LayoutA_,
TransformA,
AlignmentA,
ElementB_,
LayoutB_,
TransformB,
AlignmentB,
ElementC_,
LayoutC_,
ElementAccumulator_,
arch::OpClassTensorOp,
arch::Sm89,
ThreadblockShape_,
WarpShape_,
InstructionShape_,
EpilogueOutputOp_,
ThreadblockSwizzle_,
Stages,
Operator_
>::GemmKernel
> {
public:
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = arch::OpClassTensorOp;
using ArchTag = arch::Sm89;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Base = GemmUniversalBase<
typename kernel::DefaultGemmWithAbsMax<
ElementA_,
LayoutA_,
TransformA,
AlignmentA,
ElementB_,
LayoutB_,
TransformB,
AlignmentB,
ElementC_,
LayoutC_,
ElementAccumulator_,
OperatorClass,
ArchTag,
ThreadblockShape_,
WarpShape_,
InstructionShape_,
EpilogueOutputOp_,
ThreadblockSwizzle_,
Stages,
Operator_
>::GemmKernel
>;
using Arguments = typename Base::Arguments;
using GemmKernel = typename Base::GemmKernel;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for SM89 column-major output exchanges problem size and operand.
template <
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename LayoutB_,
typename ElementC_,
typename ElementAccumulator_,
typename ThreadblockShape_,
typename WarpShape_,
typename InstructionShape_,
typename EpilogueOutputOp_,
typename ThreadblockSwizzle_,
int Stages,
int AlignmentA,
int AlignmentB,
typename Operator_,
ComplexTransform TransformA,
ComplexTransform TransformB>
class GemmUniversalWithAbsMax<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_,
layout::ColumnMajor, // partially specialized on LayoutC
ElementAccumulator_, arch::OpClassTensorOp, arch::Sm89, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB,
Operator_, TransformA, TransformB> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = arch::OpClassTensorOp;
using ArchTag = arch::Sm89;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using UnderlyingOperator = typename GemmUniversalWithAbsMax<
ElementB,
typename layout::LayoutTranspose<LayoutB>::type,
ElementA,
typename layout::LayoutTranspose<LayoutA>::type,
ElementC,
layout::RowMajor,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
kAlignmentB,
kAlignmentA,
Operator,
kTransformB,
kTransformA
>::Base;
using GemmKernel = typename UnderlyingOperator::GemmKernel;
static int const kAlignmentC = EpilogueOutputOp::kCount;
/// Argument structure
using Arguments = typename UnderlyingOperator::Arguments;
private:
UnderlyingOperator underlying_operator_;
public:
/// Constructs the GEMM.
GemmUniversalWithAbsMax() { }
/// Helper to construct a transposed equivalent for the underying GEMM operator
static Arguments to_underlying_arguments(Arguments const &args) {
return args.transposed_problem();
}
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Computes the grid shape
static dim3 get_grid_shape(Arguments const &args) {
return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args));
}
/// Computes the maximum number of active blocks per multiprocessor
static int maximum_active_blocks(int smem_capacity = -1) {
return UnderlyingOperator::maximum_active_blocks(smem_capacity);
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/gemm_universal_with_absmax.h/0 | {
"file_path": "include/cutlass/gemm/device/gemm_universal_with_absmax.h",
"repo_id": "include",
"token_count": 4561
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/semaphore.h"
#include "cutlass/arch/arch.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled.
>
struct Gemm {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static bool const kSplitKSerial = SplitKSerial;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorA::TensorRef ref_A;
typename Mma::IteratorB::Params params_B;
typename Mma::IteratorB::TensorRef ref_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::TensorRef ref_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::OutputTileIterator::TensorRef ref_D;
typename OutputOp::Params output_op;
int *semaphore;
int gemm_k_size;
// For gather+scatter operations
int const *gather_A_indices;
int const *gather_B_indices;
int const *scatter_D_indices;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(): swizzle_log_tile(0), semaphore(0), gemm_k_size(0) { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
typename OutputOp::Params output_op = typename OutputOp::Params(),
int *workspace = nullptr,
int const *gather_A_indices = nullptr,
int const *gather_B_indices = nullptr,
int const *scatter_D_indices = nullptr
):
problem_size(problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A.layout()),
ref_A(ref_A),
params_B(ref_B.layout()),
ref_B(ref_B),
params_C(ref_C.layout()),
ref_C(ref_C),
params_D(ref_D.layout()),
ref_D(ref_D),
output_op(output_op),
gather_A_indices(gather_A_indices),
gather_B_indices(gather_B_indices),
scatter_D_indices(scatter_D_indices) {
int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
semaphore = workspace;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
Gemm() { }
/// Determines whether kernel satisfies alignment
CUTLASS_HOST_DEVICE
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D) {
static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (platform::is_same<typename Epilogue::OutputTileIterator::Layout,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Epilogue::OutputTileIterator::Layout,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
if (!TensorRef_aligned(ref_A, kAlignmentA)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_D, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size,
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Problem size is a function of threadblock index in the K dimension
int problem_size_k = min(
params.problem_size.k(),
(threadblock_tile_offset.k() + 1) * params.gemm_k_size);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A,
params.gather_A_indices);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B,
params.gather_B_indices);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
if (!kSplitKSerial || gemm_k_iterations > 0) {
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
}
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// If performing a reduction via split-K, fetch the initial synchronization
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
params.ref_C.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset,
params.scatter_D_indices
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset,
params.scatter_D_indices
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op, iterator_D, accumulators, iterator_C);
//
// Release the semaphore
//
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/kernel/gemm.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm.h",
"repo_id": "include",
"token_count": 5421
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/arch.h"
#include "cutlass/fast_math.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
class GemmUniversal<
Mma_,
Epilogue_,
ThreadblockSwizzle_,
void,
// 3.x kernels use the first template argument to define the ProblemShape
// We use this invariant to SFINAE dispatch against either the 2.x API or the 3.x API
cute::enable_if_t<not (cute::is_tuple<Mma_>::value || IsCutlass3ArrayKernel<Mma_>::value)>
> {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
//
// Structures
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
typename LayoutA::Stride stride_a;
typename LayoutB::Stride stride_b;
typename LayoutC::Stride stride_c;
typename LayoutC::Stride stride_d;
typename LayoutA::Stride::LongIndex lda;
typename LayoutB::Stride::LongIndex ldb;
typename LayoutC::Stride::LongIndex ldc;
typename LayoutC::Stride::LongIndex ldd;
int const * ptr_gather_A_indices;
int const * ptr_gather_B_indices;
int const * ptr_scatter_D_indices;
//
// Methods
//
Arguments():
ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr),
ptr_gather_A_indices(nullptr),
ptr_gather_B_indices(nullptr),
ptr_scatter_D_indices(nullptr)
{}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride stride_a,
typename LayoutB::Stride stride_b,
typename LayoutC::Stride stride_c,
typename LayoutC::Stride stride_d,
int const *ptr_gather_A_indices = nullptr,
int const *ptr_gather_B_indices = nullptr,
int const *ptr_scatter_D_indices = nullptr)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C),
stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d),
ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices),
ptr_scatter_D_indices(ptr_scatter_D_indices)
{
lda = 0;
ldb = 0;
ldc = 0;
ldd = 0;
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::LongIndex lda,
typename LayoutB::Stride::LongIndex ldb,
typename LayoutC::Stride::LongIndex ldc,
typename LayoutC::Stride::LongIndex ldd,
int const *ptr_gather_A_indices = nullptr,
int const *ptr_gather_B_indices = nullptr,
int const *ptr_scatter_D_indices = nullptr
):
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd),
ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices),
ptr_scatter_D_indices(ptr_scatter_D_indices)
{
stride_a = make_Coord(lda);
stride_b = make_Coord(ldb);
stride_c = make_Coord(ldc);
stride_d = make_Coord(ldd);
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const
{
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.stride_a, args.stride_b);
std::swap(args.batch_stride_A, args.batch_stride_B);
std::swap(args.ptr_gather_A_indices, args.ptr_gather_B_indices);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
//
// Data members
//
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int * ptr_gather_A_indices;
int * ptr_gather_B_indices;
int * ptr_scatter_D_indices;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a),
params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b),
params_C(args.ldc ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldc) : args.stride_c),
params_D(args.ldd ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldd) : args.stride_d),
output_op(args.epilogue),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(args.ptr_D),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
ptr_gather_A_indices(const_cast<int *>(args.ptr_gather_A_indices)),
ptr_gather_B_indices(const_cast<int *>(args.ptr_gather_B_indices)),
ptr_scatter_D_indices(const_cast<int *>(args.ptr_scatter_D_indices))
{}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversal::Params::update()");
// Update input/output pointers
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C = args.batch_stride_C;
this->batch_stride_D = args.batch_stride_D;
ptr_gather_A_indices = const_cast<int *>(args.ptr_gather_A_indices);
ptr_gather_B_indices = const_cast<int *>(args.ptr_gather_B_indices);
ptr_scatter_D_indices = const_cast<int *>(args.ptr_scatter_D_indices);
output_op = args.epilogue;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size)
{
CUTLASS_TRACE_HOST("GemmUniversal::can_implement()");
static int const kAlignmentA = (cute::is_same<LayoutA,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (cute::is_same<LayoutA,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (cute::is_same<LayoutB,
layout::RowMajorInterleaved<32>>::value)
? 32
: (cute::is_same<LayoutB,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (cute::is_same<LayoutC,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (cute::is_same<LayoutC,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (cute::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (cute::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (cute::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| cute::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (cute::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (cute::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (cute::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| cute::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (cute::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (cute::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (cute::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| cute::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmUniversal op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
ThreadblockSwizzle threadblock_swizzle;
run_with_swizzle(params, shared_storage, threadblock_swizzle);
}
/// Executes one GEMM with an externally-provided swizzling function
CUTLASS_DEVICE
void run_with_swizzle(Params const ¶ms, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) {
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A,
params.ptr_gather_A_indices);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B,
params.ptr_gather_B_indices);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
params.ptr_scatter_D_indices
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
params.ptr_scatter_D_indices
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_universal.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_universal.h",
"repo_id": "include",
"token_count": 9704
} | 31 |
/***************************************************************************************************
* Copyright (c) 2024 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Sparse GEMM kernel with an epilogue that computes the absolute maximum value of the output
and a pre-activation-function auxiliary output. The auxiliary output is also (optionally)
stored to global memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/params_sparse_base.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/semaphore.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled.
>
struct SparseGemmWithAbsmax {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static bool const kSplitKSerial = SplitKSerial;
static int const kSparse = Mma::kSparse;
static int const kMetaSizeInBits = Mma::kMetaSizeInBits;
static int const kMaxID2 = Mma::kMaxID2;
static int const kElementsPerElementE = Mma::kElementsPerElementE;
using ElementE = typename Mma::ElementE;
using LayoutE = typename Mma::LayoutE;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using ParamsA = typename Mma::IteratorA::Params;
using TensorRefA = typename Mma::IteratorA::TensorRef;
using ParamsB = typename Mma::IteratorB::Params;
using TensorRefB = typename Mma::IteratorB::TensorRef;
using ParamsE = typename Mma::IteratorE::Params;
using TensorRefE = typename Mma::IteratorE::TensorRef;
using ParamsC = typename Epilogue::OutputTileIterator::Params;
using TensorRefC = typename Epilogue::OutputTileIterator::TensorRef;
using ParamsD = typename Epilogue::OutputTileIterator::Params;
using TensorRefD = typename Epilogue::OutputTileIterator::TensorRef;
using ParamsAux = typename Epilogue::AuxOutputTileIterator::Params;
using TensorRefAux = typename Epilogue::AuxOutputTileIterator::TensorRef;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRefA ref_A;
TensorRefB ref_B;
TensorRefC ref_C;
TensorRefD ref_D;
TensorRefE ref_E;
TensorRefAux ref_Aux;
void* ptr_Vector;
typename LayoutC::Stride::Index ldr;
typename Epilogue::OutputOp::Params epilogue;
int split_k_slices;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments(): problem_size(0, 0, 0), split_k_slices(1) {
}
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRefA ref_A_,
TensorRefB ref_B_,
TensorRefC ref_C_,
TensorRefD ref_D_,
TensorRefE ref_E_,
TensorRefAux ref_Aux_,
void* ptr_Vector_,
typename LayoutC::Stride::Index ldr_,
typename OutputOp::Params epilogue_ =
typename OutputOp::Params(),
int split_k_slices = 1
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
ref_E(ref_E_),
ref_Aux(ref_Aux_),
ptr_Vector(ptr_Vector_),
ldr(ldr_),
epilogue(epilogue_),
split_k_slices(split_k_slices) {
}
};
/// Parameters structure
struct Params : public SparseParamsBase<
ThreadblockSwizzle, ParamsA, TensorRefA, ParamsB, TensorRefB,
ParamsE, TensorRefE> {
using Base = SparseParamsBase<
ThreadblockSwizzle, ParamsA, TensorRefA, ParamsB, TensorRefB,
ParamsE, TensorRefE>;
//
// Data members
//
ParamsC params_C;
TensorRefC ref_C;
ParamsD params_D;
TensorRefD ref_D;
ParamsAux params_Aux;
TensorRefAux ref_Aux;
void* ptr_Vector;
typename LayoutC::Stride::Index ldr;
typename OutputOp::Params output_op;
int *semaphore;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
TensorRefA ref_A,
TensorRefB ref_B,
TensorRefC ref_C,
TensorRefD ref_D,
TensorRefE ref_E,
TensorRefAux ref_Aux,
void* ptr_Vector,
typename LayoutC::Stride::Index ldr,
typename OutputOp::Params output_op = typename OutputOp::Params(),
int *workspace = nullptr
):
Base(problem_size, grid_tiled_shape, ref_A, ref_B, ref_E, Mma::Shape::kK),
params_C(ref_C.layout()),
ref_C(ref_C),
params_D(ref_D.layout()),
ref_D(ref_D),
output_op(output_op),
ref_Aux(ref_Aux),
params_Aux(ref_Aux.layout()),
ptr_Vector(ptr_Vector),
ldr(ldr) {
semaphore = workspace;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
SparseGemmWithAbsmax() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
typename Mma::IteratorE::TensorRef ref_E) {
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
static int const kAlignmentE = Mma::IteratorE::AccessType::kElements;
if (!TensorRef_aligned(ref_A, kAlignmentA)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_D, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_E, kAlignmentE)) {
return Status::kErrorMisalignedOperand;
}
if ((problem_size.m() % kAlignmentA) || ((problem_size.k() / kSparse) % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC) ||
(problem_size.m() % kAlignmentE) || ((problem_size.k() / kSparse) % kAlignmentE)) {
return Status::kErrorMisalignedOperand;
}
// The k dimension has to be the multiple of the Threadblock k because out
// of bound meta data would be initialized to 0 by acync.zfill but 0 is not
// a valid meta data.
if (problem_size.k() % Mma::Shape::kK) {
return Status::kErrorMisalignedOperand;
}
// M dimension has to be multiple of 32 (sparse float) or 16 (sparse int)
// because of the row reordering of operand E
static int const kAlignmentM = (sizeof(ElementE) == 2) ? 32 : 16;
if (problem_size.m() % kAlignmentM) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size / kSparse,
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
threadblock_tile_offset.n() * Mma::Shape::kN
};
cutlass::MatrixCoord tb_offset_E{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size / kSparse,
};
// Problem size is a function of threadblock index in the K dimension
int problem_size_k = min(
params.problem_size.k(),
(threadblock_tile_offset.k() + 1) * params.gemm_k_size);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - tb_offset_B.row() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A, B, and E operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k / kSparse},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
typename Mma::IteratorE iterator_E(
params.params_E, params.ref_E.data(),
{params.problem_size.m(),
problem_size_k / kSparse / kElementsPerElementE},
thread_idx, tb_offset_E);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
if (!kSplitKSerial || gemm_k_iterations > 0) {
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, iterator_E, accumulators);
}
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// If performing a reduction via split-K, fetch the initial synchronization
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
typename Epilogue::ElementVector *ptr_Vector = static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr;
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
params.ref_C.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to auxiliary destination tensor.
typename Epilogue::AuxOutputTileIterator iterator_Aux(
params.params_Aux,
// Only the final block writes the auxiliary tensor
((kSplitKSerial && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1))
? nullptr
: params.ref_Aux.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op,
// Only the final block uses Vector
((kSplitKSerial && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1))
? nullptr
: ptr_Vector,
iterator_D,
accumulators,
iterator_C,
iterator_Aux,
params.problem_size.mn(),
threadblock_offset);
//
// Release the semaphore
//
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
__threadfence();
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/kernel/sparse_gemm_with_absmax.h/0 | {
"file_path": "include/cutlass/gemm/kernel/sparse_gemm_with_absmax.h",
"repo_id": "include",
"token_count": 6338
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/tensor_op_multiplicand_sm70.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op_sm70.h"
#include "cutlass/gemm/warp/mma_tensor_op_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<8, 8, 4>, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<8, 8, 4>;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA =
layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value>;
// Shared memory layout
using SmemLayoutB =
layout::RowMajorVoltaTensorOpMultiplicandBCongruous<
sizeof_bits<ElementB>::value>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<8, 8, 4>, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<8, 8, 4>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
layout::PitchLinearShape<4, 8>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
0,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
layout::PitchLinearShape<4, 8>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
1,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<8, 8, 4>, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<8, 8, 4>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::RowMajorVoltaTensorOpMultiplicandBCongruous<
sizeof_bits<ElementB>::value>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
layout::PitchLinearShape<4, 8>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
0,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<8, 8, 4>, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<8, 8, 4>;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
layout::PitchLinearShape<4, 8>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
1,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/default_mma_core_sm70.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_mma_core_sm70.h",
"repo_id": "include",
"token_count": 6940
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_gaussian_complex_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transform on B operand
ComplexTransform TransformB = ComplexTransform::kNone,
/// Do source operands need more than one elements
bool GeneralizedOperatorElements = false,
/// Used for partial specialization
typename Enable = bool
>
class MmaGaussianComplexTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex using real-valued TensorOps
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaGaussianComplexTensorOp<
Shape_,
complex<RealElementA>,
LayoutA_,
complex<RealElementB>,
LayoutB_,
complex<RealElementC>,
LayoutC_,
Policy_,
TransformA,
TransformB> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddGaussianComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = FragmentB;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpGaussianComplexAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'gaussian complex' in the sense that the accumulation is
/// done in three parts namely part1, part2, and part3. The parts 1, 2, and 3 are stored consecutively
/// in InteratorC::Frament. This matches the structure of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
static_assert(
FragmentC::kElements == 3 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements,
"Unexpected gaussian complex fragment length.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaGaussianComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
static_assert(MmaOperandA::kElements == 1,
"This implementation only supports math instructions in which exactly one element is needed for the A operand."
"We can geneneralize later.");
static_assert(MmaOperandB::kElements == 1,
"This implementation only supports math instructions in which exactly one element is needed for the B operand."
"We can geneneralize later.");
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.part1(), (a.real() + a.imag()), b.real(), accum.part1());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Asum;
MmaOperandB operand_Br;
operand_Asum[0] = A[m].real() + ((kTransformA == ComplexTransform::kConjugate) ? -A[m].imag() : +A[m].imag());
operand_Br[0] = B[n].real();
// accumulator part1
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_Asum, operand_Br, *accum);
}
// mma(accum.part2(), -a.real(), (b.real() - b.imag()), accum.part2());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Ar;
MmaOperandB operand_Bdiff;
operand_Ar[0] = -A[m].real();
operand_Bdiff[0] = B[n].real() - ((kTransformB == ComplexTransform::kConjugate) ? -B[n].imag() : +B[n].imag());
// accumulator part2
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_Ar, operand_Bdiff, *accum);
}
// mma(accum.part3(), a.imag(), (b.real() + b.imag()), accum.part3())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Ai;
MmaOperandB operand_Bsum;
operand_Ai[0] = (kTransformA == ComplexTransform::kConjugate) ? -A[m].imag() : +A[m].imag();
operand_Bsum[0] = B[n].real() + ((kTransformB == ComplexTransform::kConjugate) ? -B[n].imag() : +B[n].imag());
// accumulator part3
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + 2 * MmaIterations::kCount;
mma(*accum, operand_Ai, operand_Bsum, *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex using real-valued TensorOps
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaGaussianComplexTensorOp<
Shape_,
complex<RealElementA>,
LayoutA_,
complex<RealElementB>,
LayoutB_,
complex<RealElementC>,
LayoutC_,
Policy_,
TransformA,
TransformB,
true> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddGaussianComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = FragmentB;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpGaussianComplexAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'gaussian complex' in the sense that the accumulation is
/// done in three parts namely part1, part2, and part3. The parts 1, 2, and 3 are stored consecutively
/// in InteratorC::Frament. This matches the structure of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
static_assert(
FragmentC::kElements == 3 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements,
"Unexpected gaussian complex fragment length.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaGaussianComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.part1(), (a.real() + a.imag()), b.real(), accum.part1());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Asum;
MmaOperandB operand_Br;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_Asum[mk] = A[m*MmaOperandA::kElements + mk].real() + ((kTransformA == ComplexTransform::kConjugate) ?
-A[m*MmaOperandA::kElements + mk].imag() : +A[m*MmaOperandA::kElements + mk].imag());
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_Br[nk] = B[n*MmaOperandB::kElements + nk].real();
// accumulator part1
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_Asum, operand_Br, *accum);
}
// mma(accum.part2(), -a.real(), (b.real() - b.imag()), accum.part2());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Ar;
MmaOperandB operand_Bdiff;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_Ar[mk] = -A[m*MmaOperandA::kElements + mk].real();
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_Bdiff[nk] = B[n*MmaOperandB::kElements + nk].real() - ((kTransformB == ComplexTransform::kConjugate) ?
-B[n*MmaOperandB::kElements + nk].imag() : +B[n*MmaOperandB::kElements + nk].imag());
// accumulator part2
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_Ar, operand_Bdiff, *accum);
}
// mma(accum.part3(), a.imag(), (b.real() + b.imag()), accum.part3())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Ai;
MmaOperandB operand_Bsum;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_Ai[mk] = (kTransformA == ComplexTransform::kConjugate) ?
-A[m*MmaOperandA::kElements + mk].imag() : +A[m*MmaOperandA::kElements + mk].imag();
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_Bsum[nk] = B[n*MmaOperandB::kElements + nk].real() + ((kTransformB == ComplexTransform::kConjugate) ?
-B[n*MmaOperandB::kElements + nk].imag() : +B[n*MmaOperandB::kElements + nk].imag());
// accumulator part3
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + 2 * MmaIterations::kCount;
mma(*accum, operand_Ai, operand_Bsum, *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op.h",
"repo_id": "include",
"token_count": 7585
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for loading 128b vectors of 64b elements.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCongruous64b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
static_assert(!(Shape::kContiguous % 16) && !(Shape::kStrided % 4), "Divisibility.");
static_assert(sizeof_bits<Element_>::value == 64, "This is specialized for 64b accesses.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCongruous64b;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Load two elements per access
static int const kElementsPerAccess = 2;
/// Policy defining internal details of tile iterator
struct Policy {
/// Shape of one access
using Delta = layout::PitchLinearShape<8, 4>;
/// Number of iterations to load
using Iterations = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess / Delta::kContiguous,
InstructionShape::kStrided / Delta::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, kElementsPerAccess, 16>;
/// Internal counter used to jump to next K partition
int k_group_idx_;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0),
k_group_idx_(0) {
int access_strided = lane_id / Policy::Delta::kContiguous;
int access_contiguous = (lane_id % Policy::Delta::kContiguous) ^ access_strided;
pointer_= reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int offset =
(tile_offset.strided() * InstructionShape::kStrided) * stride_ * kElementsPerAccess +
tile_offset.contiguous() * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
add_tile_offset({0, 1});
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
add_tile_offset({0, -1});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::Iterations::kContiguous; ++c) {
int access_idx = c + s * Policy::Iterations::kContiguous;
AccessType const *source_ptr = pointer_ +
Policy::Delta::kContiguous * c +
Policy::Delta::kStrided * s * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr);
fetch_ptr[access_idx] = *source;
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::TensorOpMultiplicandCongruous64b,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.strided(), tile_offset.contiguous()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::TensorOpMultiplicandCongruous64b,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for loading 128b vectors of 64b elements.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicand64bCrosswise,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
static_assert(!(Shape::kContiguous % 4) && !(Shape::kStrided % 16), "Divisibility.");
static_assert(sizeof_bits<Element_>::value == 64, "This is specialized for 64b accesses.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicand64bCrosswise;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Load two elements per access
static int const kElementsPerAccess = 2;
/// Policy defining internal details of tile iterator
struct Policy {
/// Shape of one access
using Delta = layout::PitchLinearShape<4, 16>;
/// Number of iterations to load
using Iterations = layout::PitchLinearShape<
InstructionShape::kContiguous / Delta::kContiguous,
Shape::kStrided / Delta::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, kElementsPerAccess, 16>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kStrided * InstructionShape::kContiguous / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
/// Internal counter for tracking K-group
Index k_group_idx_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0),
k_group_idx_(0) {
int access_strided = lane_id / 8;
int access_contiguous = (lane_id % 8);
byte_offset_ = (access_contiguous + access_strided * stride_) * sizeof(AccessType);
pointer_= reinterpret_cast<AccessType const *>(ref.data());
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
pointer_ += offset / kElementsPerAccess;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int offset = (tile_offset.contiguous() * InstructionShape::kContiguous) *
stride_ * kElementsPerAccess +
tile_offset.strided() * Shape::kStrided;
add_pointer_offset(offset);
int old_k_group_idx = k_group_idx_;
k_group_idx_ += tile_offset.contiguous();
if ((k_group_idx_ & 2) ^ (old_k_group_idx & 2)) {
byte_offset_ ^= 0x40;
}
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) {
// TODO: fix this if it becomes an issue during warp it reset
add_tile_offset(tile_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
pointer_ += stride_ * InstructionShape::kContiguous;
if (k_group_idx_ & 0x1) {
// xor ptr
byte_offset_ ^= 0x40;
}
++k_group_idx_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::Iterations::kStrided; ++s) {
int access_idx = c + s * Policy::Iterations::kContiguous;
AccessType const *source_ptr = pointer_ +
Policy::Delta::kContiguous * c * stride_ +
Policy::Delta::kStrided * s / kElementsPerAccess;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr);
fetch_ptr[access_idx] = *source;
}
}
Element *exchange_ptr = reinterpret_cast<Element *>(&frag);
if (k_group_idx_ & 1) {
// exchange on 64b granularity
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Fragment::kElements; i += 2) {
Element tmp = exchange_ptr[i];
exchange_ptr[i] = exchange_ptr[i + 1];
exchange_ptr[i + 1] = tmp;
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset = tile_offset.contiguous() *
InstructionShape::kContiguous /
Layout::kElementsPerAccess +
tile_offset.strided() * Shape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
k_group_idx_ = k_group;
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::TensorOpMultiplicand64bCrosswise,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) {
iterator_.add_tile_offset_negative({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.strided(), tile_offset.contiguous()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::TensorOpMultiplicand64bCrosswise,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) {
iterator_.add_tile_offset_negative({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for canonical matrix layouts
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand_,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of one matrix production operation (concept: MatrixShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads = 32,
/// Number of partitions along K dimension
int PartitionsK_ = 1>
class MmaTensorOpMultiplicandTileIteratorCanonical {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
/// Basic check
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Number of elements accessed per Shared Memory load
static int const kElementsPerAccess =
(sizeof_bits<Element>::value >= 32 ? 1 : 32 / sizeof_bits<Element>::value);
private:
static int const kWarpShapeOuter =
(kOperand == Operand::kA ? Shape::kRow : Shape::kColumn);
static int const kWarpShapeInner =
(kOperand == Operand::kA ? Shape::kColumn : Shape::kRow);
/// Rounded up instruction counts
using InstructionCount = MatrixShape<
Shape::kRow / InstructionShape::kRow,
Shape::kColumn / InstructionShape::kColumn
>;
/// Rounded up tile dimensions
using WarpShapeDivisible = MatrixShape<
InstructionCount::kRow * InstructionShape::kRow,
InstructionCount::kColumn * InstructionShape::kColumn
>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
WarpShapeDivisible::kRow * WarpShapeDivisible::kColumn / kThreads
>;
/// Memory access type
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Underlying tensor reference
TensorRef ref_;
/// Extent of tensor
MatrixCoord extent_;
/// Origin
MatrixCoord origin_;
/// Used to conditionally enable extents checking
bool divisible_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical(): divisible_(true) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical(
TensorRef const &ref,
int lane_id
): ref_(ref), extent_(Shape::kRow, Shape::kColumn), divisible_(true) {
if (kOperand == Operand::kA) {
origin_ = MatrixCoord(lane_id / 4, (lane_id % 4) * kElementsPerAccess);
}
else {
origin_ = MatrixCoord((lane_id % 4) * kElementsPerAccess, lane_id / 4);
}
ref_.add_coord_offset(origin_);
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical(
TensorRef const &ref,
TensorCoord extent,
int lane_id
): ref_(ref), extent_(extent), divisible_(false) {
if (kOperand == Operand::kA) {
origin_ = MatrixCoord(lane_id / 4, (lane_id % 4) * kElementsPerAccess);
}
else {
origin_ = MatrixCoord((lane_id % 4) * kElementsPerAccess, lane_id / 4);
}
ref_.add_coord_offset(origin_);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical &add_tile_offset(TensorCoord const &tile_offset) {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical & operator++() {
if (kOperand == Operand::kA) {
add_tile_offset({0, 1});
}
else {
add_tile_offset({1, 0});
}
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical & operator--() {
if (kOperand == Operand::kA) {
add_tile_offset({0, -1});
}
else {
add_tile_offset({-1, 0});
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
int const kWarpShapeDivisibleInner =
(kOperand == Operand::kA ? WarpShapeDivisible::kColumn : WarpShapeDivisible::kRow);
// Take advantage of Tensor Op's 8 x 4T access pattern
int const kAccessesInner = (kWarpShapeDivisibleInner / kElementsPerAccess) / 4;
AccessType *access_ptr = reinterpret_cast<AccessType *>(&frag);
if (kOperand == Operand::kA) {
int const kTilesPerInstruction = InstructionShape::kRow / 8;
CUTLASS_PRAGMA_UNROLL
for (int inst_m_idx = 0; inst_m_idx < InstructionCount::kRow; ++inst_m_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_m_idx = 0; access_m_idx < kTilesPerInstruction; ++access_m_idx) {
int access_idx =
access_m_idx + kTilesPerInstruction * (inner_idx + kAccessesInner * inst_m_idx);
MatrixCoord offset(
access_m_idx * 8 + inst_m_idx * InstructionShape::kRow,
inner_idx * 4 * kElementsPerAccess);
MatrixCoord access_coord = origin_ + offset;
if (divisible_ ||
(access_coord.row() < extent_.row() && access_coord.column() < extent_.column())) {
access_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
ref_.data() + ref_.offset(offset));
}
else {
AccessType zero;
zero.clear();
access_ptr[access_idx] = zero;
}
}
}
}
}
else {
CUTLASS_PRAGMA_UNROLL
for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn; ++inst_n_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
int access_idx = inner_idx + kAccessesInner * inst_n_idx;
MatrixCoord offset(
inner_idx * 4 * kElementsPerAccess,
inst_n_idx * 8);
MatrixCoord access_coord = origin_ + offset;
if (divisible_ ||
(access_coord.row() < extent_.row() && access_coord.column() < extent_.column())) {
access_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
ref_.data() + ref_.offset(offset));
}
else {
AccessType zero;
zero.clear();
access_ptr[access_idx] = zero;
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
load_with_pointer_offset(frag, byte_offset * 8 / sizeof_bits<Element>::value);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset) + pointer_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset) + byte_offset * 8 / sizeof_bits<Element>::value);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation
}
};
/// Wrapper for ColumnMajor
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajor,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIteratorCanonical<
Shape, kOperand, Element,
layout::ColumnMajor,
InstructionShape,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
TensorCoord const & extent,
int lane_id
): iterator_({ref.data(), ref.stride()}, extent, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
/// Wrapper for RowMajor
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajor,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIteratorCanonical<
Shape, kOperand, Element,
layout::RowMajor,
InstructionShape,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
TensorCoord const &extent,
int lane_id
): iterator_({ref.data(), ref.stride()}, extent, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h",
"repo_id": "include",
"token_count": 25010
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used by TensorRef and derived classes.
Layout functions map logical coordinates to linear memory. They often require additional
data to describe strides between elements.
Layout functions must implement all members in the public interface of IdentityTensorLayout<>
defined in cutlass/tensor_ref.h.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/pitch_linear_coord.h"
namespace cutlass {
namespace layout {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Defines data layouts of various matrix formats usable by TensorRef and other classes.
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for row-major matrices.
class RowMajor {
public:
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
RowMajor(LongIndex ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajor(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajor packed(MatrixCoord const &extent) {
return RowMajor(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return LongIndex(coord.row()) * LongIndex(stride_[0]) + coord.column();
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
return MatrixCoord(Index(offset / stride_[0]), Index(offset % stride_[0]));
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return LongIndex(extent.row()) * LongIndex(stride_[0]);
}
};
/// Mapping function for column-major matrices.
class ColumnMajor {
public:
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajor(LongIndex ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajor(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajor packed(MatrixCoord const &extent) {
return ColumnMajor(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return LongIndex(coord.column()) * LongIndex(stride_[0]) + coord.row();
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
return MatrixCoord(Index(offset % stride_[0]), Index(offset / stride_[0]));
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return LongIndex(extent.column()) * LongIndex(stride_[0]);
}
};
/// Mapping function for interleaved matrices. Matrix is structured
/// as row-major arrangement of fixed-size columns.
template <int Interleave>
struct RowMajorInterleaved {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
/// Size of interleaved columns
static int const kInterleave = Interleave;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorInterleaved(LongIndex ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorInterleaved(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorInterleaved packed(MatrixCoord const &extent) {
return RowMajorInterleaved(extent.column() * kInterleave);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
Index row_major = coord.row() / kInterleave;
Index row_minor = coord.row() % kInterleave;
return LongIndex(row_major) * LongIndex(stride_[0]) + LongIndex(coord.column()) * kInterleave + row_minor;
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
Index row_major = Index(offset / stride_[0]);
Index residual = Index(offset % stride_[0]);
Index column = residual / kInterleave;
Index row_minor = residual % kInterleave;
return MatrixCoord(row_major * kInterleave + row_minor, column);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return (extent.row() + kInterleave - 1) / kInterleave * stride_[0];
}
};
/// Mapping function for interleaved matrices. Matrix is structured
/// as column-major arrangement of fixed-size rows.
template <int Interleave>
struct ColumnMajorInterleaved {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
/// Size of interleaved columns
static int const kInterleave = Interleave;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorInterleaved(LongIndex ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorInterleaved(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorInterleaved packed(MatrixCoord const &extent) {
return ColumnMajorInterleaved(extent.row() * kInterleave);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
Index column_major = coord.column() / kInterleave;
Index column_minor = coord.column() % kInterleave;
return LongIndex(column_major) * LongIndex(stride_[0]) + LongIndex(coord.row()) * kInterleave + column_minor;
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
Index column_major = Index(offset / stride_[0]);
Index residual = Index(offset % stride_[0]);
Index row = residual / kInterleave;
Index column_minor = residual % kInterleave;
return MatrixCoord(row, column_major * kInterleave + column_minor);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return (extent.column() + kInterleave - 1) / kInterleave * stride_[0];
}
};
/// Enumerated type for canonical pitch-linear matrix layouts
enum class Matrix {
kColumnMajor, ///< leading dimension refers to stride between columns; stride along rows is 1
kRowMajor ///< leading dimension refers to stride between rows; stride along columns is 1
};
/// Mapping function for scenario in which layout is row-major or column-major but this information
/// is only available at runtime.
struct ContiguousMatrix {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
/// Enumerated type indicating canonical matrix layout
Matrix layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ContiguousMatrix(
Index ldm = 0,
Matrix layout = Matrix::kColumnMajor
):
stride_(ldm), layout_(layout) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ContiguousMatrix packed(
MatrixCoord const &extent,
Matrix layout = Matrix::kColumnMajor) {
Index ldm = 0;
if (layout == Matrix::kColumnMajor) {
ldm = extent.row();
}
else if (layout == Matrix::kRowMajor) {
ldm = extent.column();
}
return ContiguousMatrix(ldm, layout);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
if (layout_ == Matrix::kColumnMajor) {
return coord.row() + coord.column() * stride_[0];
}
else if (layout_ == Matrix::kRowMajor) {
return coord.row() * stride_[0] + coord.column();
}
else {
// degenerate case
return 0;
}
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
CUTLASS_UNUSED(offset);
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
if (layout_ == Matrix::kColumnMajor) {
return stride_[0] * extent.column();
}
else if (layout_ == Matrix::kRowMajor) {
return stride_[0] * extent.row();
}
else {
// degenerate case
return 0;
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for scenario in which both rows and columns are separated by a stride.
template <int Rank>
struct AffineRankN {
/// Logical rank of tensor
static int const kRank = Rank;
/// Rank of stride vector
static int const kStrideRank = kRank;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Coord<kRank, Index>;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
AffineRankN(
Stride const &stride = Stride()
):
stride_(stride) { }
/// Ctor
CUTLASS_HOST_DEVICE
AffineRankN(
Coord<kRank/2, LongIndex> const &stride_m,
Coord<kRank/2, LongIndex> const &stride_n
) {
// Concatenate the strides
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kRank/2; ++m) {
stride_[m] = stride_m[m];
}
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kRank/2; ++n) {
stride_[n + kRank/2] = stride_n[n];
}
}
/// Ctor for N = 2
CUTLASS_HOST_DEVICE
AffineRankN(
LongIndex const &stride_m,
LongIndex const &stride_n
) {
stride_[0] = stride_m;
stride_[1] = stride_n;
}
/// Ctor for N = 2
CUTLASS_HOST_DEVICE
AffineRankN(
LongIndex const &stride
) {
stride_[0] = stride;
stride_[1] = 1;
}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static AffineRankN packed(TensorCoord const &extent) {
AffineRankN layout;
layout.stride_[kRank - 1] = 1;
CUTLASS_PRAGMA_UNROLL
for (int i = kRank - 1; i > 0; --i) {
layout.stride_[i - 1] = layout.stride_[i] * extent[i];
}
return layout;
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return dot(coord, stride_);
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
return TensorCoord();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
int idx = stride_.max_dim_index();
return extent[idx] * stride_[idx];
}
};
/// Mapping function for scenario in which both rows and columns are separated by a stride.
/// Row stride is smaller than column stride in AffineRank2ColumnMajor.
struct AffineRank2ColumnMajor {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 2;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2ColumnMajor(
Stride const &stride = Stride()
):
stride_(stride) { }
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2ColumnMajor(
LongIndex row_stride, ///< stride between elements in consecutive rows
LongIndex column_stride ///< stride between elements in consecutive columns
)
{ stride_[0] = row_stride; stride_[1] = column_stride;}
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2ColumnMajor(
LongIndex stride
)
{ stride_[0] = 1; stride_[1] = stride;}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static AffineRank2ColumnMajor packed(MatrixCoord const &extent) {
return AffineRank2ColumnMajor(1, extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return dot(coord, stride_);
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
CUTLASS_UNUSED(offset);
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return extent.column() * stride_[1];
}
};
/// Mapping function for scenario in which both rows and columns are separated by a stride.
/// Column stride is smaller than row stride in AffineRank2RowMajor.
struct AffineRank2RowMajor {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 2;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2RowMajor(
Stride const &stride = Stride()
):
stride_(stride) { }
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2RowMajor(
LongIndex row_stride, ///< stride between elements in consecutive rows
LongIndex column_stride ///< stride between elements in consecutive columns
) { stride_[0] = row_stride; stride_[1] = column_stride;}
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2RowMajor(
LongIndex stride
) { stride_[0] = stride; stride_[1] = 1;}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static AffineRank2RowMajor packed(MatrixCoord const &extent) {
return AffineRank2RowMajor(1, extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return dot(coord, stride_);
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
CUTLASS_UNUSED(offset);
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return extent.row() * stride_[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Utility functions to convert stride_factor to the strides used by the Affine2 layout.
//
// stride_factor is the logical distance between two coorinates.
//
// All Coodinates used here are matrix coordinates. stride[0] and extent[0] are for the
// rows. stride[1] and extent[1] are for the columns.
template <typename Affine2Layout>
struct Affine2Layout_Factory {
CUTLASS_HOST_DEVICE
static Affine2Layout layout_factory(cutlass::Coord<2> const &extent, typename Affine2Layout::Stride stride_factor) {
return Affine2Layout::packed(extent);
}
};
template <>
struct Affine2Layout_Factory<cutlass::layout::AffineRank2ColumnMajor> {
CUTLASS_HOST_DEVICE
static cutlass::layout::AffineRank2ColumnMajor layout_factory(
cutlass::Coord<2> const &extent,
typename cutlass::layout::AffineRank2ColumnMajor::Stride stride_factor) {
return cutlass::layout::AffineRank2ColumnMajor({ stride_factor[0], stride_factor[0] * stride_factor[1] * extent[0] });
}
};
template <>
struct Affine2Layout_Factory<cutlass::layout::AffineRank2RowMajor> {
CUTLASS_HOST_DEVICE
static cutlass::layout::AffineRank2RowMajor layout_factory(
cutlass::Coord<2> const &extent,
typename cutlass::layout::AffineRank2RowMajor::Stride stride_factor) {
return cutlass::layout::AffineRank2RowMajor({ stride_factor[0] * stride_factor[1] * extent[1], stride_factor[1] });
}
};
// The base layout cutlass::layout::AffineRankN<2> is similar to AffineRank2ColumnMajor
template <>
struct Affine2Layout_Factory<cutlass::layout::AffineRankN<2>> {
CUTLASS_HOST_DEVICE
static cutlass::layout::AffineRankN<2> layout_factory(
cutlass::Coord<2> const &extent,
typename cutlass::layout::AffineRankN<2>::Stride stride_factor) {
return cutlass::layout::AffineRankN<2>({ stride_factor[0], stride_factor[0] * stride_factor[1] * extent[0] });
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for block-linear matrices. Matrix is structured
/// as column-major arrangement of 2D tiles (that are column-major).
template <int BlockRows, int BlockColumns>
struct ColumnMajorBlockLinear {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
/// Size of a block in rows
static int const kBlockRows = BlockRows;
/// Size of a block in columns
static int const kBlockColumns = BlockColumns;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorBlockLinear(Index ldm = 0): stride_(ldm) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorBlockLinear packed(MatrixCoord const &extent) {
return ColumnMajorBlockLinear(extent.row() * kBlockRows * kBlockColumns);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return
(coord.row() % kBlockRows) +
(coord.column() % kBlockColumns) * kBlockRows +
(coord.row() / kBlockRows) * kBlockRows * kBlockColumns +
(coord.column() / kBlockColumns) * stride_[0];
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return (extent.column() + kBlockColumns - 1) / kBlockColumns * stride_[0];
}
};
/// Mapping function for block-linear matrices. Matrix is structured
/// as row-major arrangement of 2D tiles (that are row-major)
template <int BlockRows, int BlockColumns>
struct RowMajorBlockLinear {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
/// Size of a block in rows
static int const kBlockRows = BlockRows;
/// Size of a block in columns
static int const kBlockColumns = BlockColumns;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorBlockLinear(Index ldm = 0): stride_(ldm) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorBlockLinear packed(MatrixCoord const &extent) {
return RowMajorBlockLinear(extent.column() * kBlockRows * kBlockColumns);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return
(coord.column() % kBlockColumns) +
(coord.row() % kBlockRows) * kBlockColumns +
(coord.column() / kBlockColumns) * kBlockRows * kBlockColumns +
(coord.row() / kBlockRows) * stride_[0];
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return (extent.row() + kBlockRows - 1) / kBlockRows * stride_[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct GeneralMatrix {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 2;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index>;
private:
//
// Data members
//
Matrix layout_id_;
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
GeneralMatrix(): layout_id_(Matrix::kColumnMajor), stride_(make_Coord(0, 1)) { }
/// Ctor
CUTLASS_HOST_DEVICE
GeneralMatrix(
Matrix layout_id,
Index ldm,
Index interleave): layout_id_(layout_id), stride_(make_Coord(ldm, interleave)) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static GeneralMatrix packed(
MatrixCoord const &extent,
Matrix layout_id = Matrix::kColumnMajor,
Index interleave = 1) {
Index c;
if (layout_id == Matrix::kRowMajor) {
c = extent.column();
}
else {
c = extent.row();
}
Index ldm = c * interleave;
return GeneralMatrix(layout_id, ldm, interleave);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
Index c, s;
if (layout_id_ == Matrix::kRowMajor) {
c = coord.column();
s = coord.row();
}
else {
s = coord.column();
c = coord.row();
}
Index v = s / stride_[1];
Index residual = (s % stride_[1]);
return LongIndex(c) * LongIndex(stride_[1]) + LongIndex(v) * LongIndex(stride_[0]) + residual;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
CUTLASS_HOST_DEVICE
Matrix layout_id() const {
return layout_id_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
CUTLASS_HOST_DEVICE
Matrix & layout_id() {
return layout_id_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
Index s;
if (layout_id_ == Matrix::kRowMajor) {
s = extent.row();
}
else {
s = extent.column();
}
Index v = Index((s + stride_[1] - 1) / stride_[1]);
return LongIndex(v) * LongIndex(stride_[0]);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines transposes of matrix layouts
template <typename Layout>
struct LayoutTranspose;
/// Transpose of row-major is column-major
template <>
struct LayoutTranspose<layout::RowMajor> {
using type = layout::ColumnMajor;
};
/// Transpose of column-major is row-major
template <>
struct LayoutTranspose<layout::ColumnMajor> {
using type = layout::RowMajor;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace layout
} // namespace cutlass
| include/cutlass/layout/matrix.h/0 | {
"file_path": "include/cutlass/layout/matrix.h",
"repo_id": "include",
"token_count": 11541
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used by TensorRef and derived classes for pitch-linear memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template defining a shape used by pitch-linear operators
template <
int Contiguous,
int Strided
>
struct PitchLinearShape {
static int const kContiguous = Contiguous;
static int const kStrided = Strided;
static int const kCount = Contiguous * Strided;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Coordinate in pitch-linear space
struct PitchLinearCoord : public Coord<2, int> {
public:
/// Integer-valued index
using Index = int;
/// Base type is a Coord of rank=2
using Base = Coord<2, Index>;
/// Long integer type
using LongIndex = typename Base::LongIndex;
private:
/// Rows dimension
static int const kContiguous = 0;
/// Columns dimension
static int const kStrided = 1;
public:
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
PitchLinearCoord() { }
/// Constructs from Coord<2>
CUTLASS_HOST_DEVICE
PitchLinearCoord(Coord<2, Index> const &coord): Base(coord) { }
/// Helper to construct from a row and column
CUTLASS_HOST_DEVICE
PitchLinearCoord(Index contiguous_, Index strided_): Base(make_Coord(contiguous_, strided_)) { }
/// Helper to construct from a row and column based on LongIndex
CUTLASS_HOST_DEVICE
PitchLinearCoord(LongIndex contiguous_, LongIndex strided_)
: Base(make_Coord(Index(contiguous_), Index(strided_))) { }
/// Returns the contiguous dimension
CUTLASS_HOST_DEVICE
Index const & contiguous() const { return this->at(kContiguous); }
/// Returns the contiguous dimension
CUTLASS_HOST_DEVICE
Index & contiguous() { return this->at(kContiguous); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index const & strided() const { return this->at(kStrided); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index & strided() { return this->at(kStrided); }
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
PitchLinearCoord operator+(Base const& b) const {
return PitchLinearCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
PitchLinearCoord operator-(Base const& b) const {
return PitchLinearCoord(Base::operator-(b));
}
CUTLASS_HOST_DEVICE
PitchLinearCoord operator-() const {
return PitchLinearCoord(-at(0), -at(1));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
PitchLinearCoord operator*(Base const& b) const {
return PitchLinearCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
PitchLinearCoord operator/(Base const& b) const {
return PitchLinearCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
PitchLinearCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
PitchLinearCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
PitchLinearCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
PitchLinearCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/pitch_linear_coord.h/0 | {
"file_path": "include/cutlass/pitch_linear_coord.h",
"repo_id": "include",
"token_count": 1639
} | 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Performs comparison between two elements with support for floating-point comparisons.
*/
#pragma once
#include "numeric_types.h"
#include "complex.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, typename U = T>
CUTLASS_HOST_DEVICE
bool relatively_equal(T a, T b, U epsilon, U nonzero_floor);
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
// This floating-point comparison function implements the method described in
//
// https://floating-point-gui.de/errors/comparison/
//
template <typename T>
CUTLASS_HOST_DEVICE
bool relatively_equal_float(T a, T b, T epsilon, T nonzero_floor) {
#if defined(__CUDACC_RTC__)
using cuda::std::abs;
#else
using std::abs;
#endif
T abs_A = abs(a);
T abs_B = abs(b);
T diff = abs(a - b);
T zero = T(0);
if (a == b) {
return true;
}
else if (a == zero || b == zero || diff < nonzero_floor) {
return diff < epsilon * nonzero_floor;
}
return diff < epsilon * (abs_A + abs_B);
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<bool>(bool a, bool b, bool, bool) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint1b_t>(uint1b_t a, uint1b_t b, uint1b_t, uint1b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int2b_t>(int2b_t a, int2b_t b, int2b_t, int2b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint2b_t>(uint2b_t a, uint2b_t b, uint2b_t, uint2b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int4b_t>(int4b_t a, int4b_t b, int4b_t, int4b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint4b_t>(uint4b_t a, uint4b_t b, uint4b_t, uint4b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int8_t>(int8_t a, int8_t b, int8_t, int8_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint8_t>(uint8_t a, uint8_t b, uint8_t, uint8_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int16_t>(int16_t a, int16_t b, int16_t, int16_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint16_t>(uint16_t a, uint16_t b, uint16_t, uint16_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int32_t>(int32_t a, int32_t b, int32_t, int32_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint32_t>(uint32_t a, uint32_t b, uint32_t, uint32_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int64_t>(int64_t a, int64_t b, int64_t, int64_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint64_t>(uint64_t a, uint64_t b, uint64_t, uint64_t) {
return (a == b);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<float_e4m3_t>(float_e4m3_t a, float_e4m3_t b, float_e4m3_t epsilon, float_e4m3_t nonzero_floor) {
return detail::relatively_equal_float<float>(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<float_e5m2_t>(float_e5m2_t a, float_e5m2_t b, float_e5m2_t epsilon, float_e5m2_t nonzero_floor) {
return detail::relatively_equal_float<float>(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<half_t>(half_t a, half_t b, half_t epsilon, half_t nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<bfloat16_t>(
bfloat16_t a,
bfloat16_t b,
bfloat16_t epsilon,
bfloat16_t nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<tfloat32_t>(
tfloat32_t a,
tfloat32_t b,
tfloat32_t epsilon,
tfloat32_t nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<float>(float a, float b, float epsilon, float nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<double>(double a, double b, double epsilon, double nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
template<typename T>
CUTLASS_HOST_DEVICE
bool relatively_equal(complex<T> a, complex<T> b, T epsilon, T nonzero_floor) {
#if defined(__CUDACC_RTC__)
using cuda::std::abs;
#else
using std::abs;
#endif
T abs_A = abs(a);
T abs_B = abs(b);
T diff = abs(a - b);
complex<T> zero = complex<T>{T{}, T{}};
if (a == b) {
return true;
}
else if (a == zero || b == zero || diff < nonzero_floor) {
return diff < epsilon * nonzero_floor;
}
return diff < epsilon * (abs_A + abs_B);
}
template <typename T>
CUTLASS_HOST_DEVICE
bool relatively_equal(complex<T> a, complex<T> b, complex<T> epsilon, complex<T> nonzero_floor) {
#if defined(__CUDACC_RTC__)
using cuda::std::abs;
#else
using std::abs;
#endif
T abs_A = abs(a);
T abs_B = abs(b);
complex<T> diff = a - b;
T abs_diff = abs(diff);
complex<T> zero = complex<T>{T{}, T{}};
if (a == b) {
return true;
}
else if (a == zero || b == zero || abs_diff < abs(nonzero_floor)) {
return abs_diff < abs(epsilon * nonzero_floor);
}
return abs_diff < abs(epsilon) * (abs_A + abs_B);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/relatively_equal.h/0 | {
"file_path": "include/cutlass/relatively_equal.h",
"repo_id": "include",
"token_count": 2814
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
namespace cutlass {
namespace transform {
namespace thread {
namespace UnaryTransform {
struct Identity; ///< None (i.e., identity)
struct Conjugate; ///< Complex conjugate
}
/// Element-wise unary operator that transforms one element of a fragment at a time
template<
typename FragmentIn, ///< Input Fragment
typename FragmentOut,///< Output Fragment
typename Transform> ///< Unary transform operator
class UnaryOp
{
public:
CUTLASS_DEVICE
static FragmentOut execute(FragmentIn &in)
{
static_assert(FragmentIn::kElements == FragmentOut::kElements, "Number of elements must match.");
static_assert(platform::is_same<Transform, UnaryTransform::Identity>::value ||
platform::is_same<Transform, UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
FragmentOut out;
if (platform::is_same<Transform, UnaryTransform::Identity>::value )
{
CUTLASS_PRAGMA_UNROLL
for (int i=0; i < FragmentIn::kElements; ++i){
out[i] = static_cast<typename FragmentOut::Element>(in[i]);
}
}
else if (platform::is_same<Transform, UnaryTransform::Conjugate>::value )
{
for (int i=0; i < FragmentIn::kElements; ++i){
out[i] = conj(static_cast<typename FragmentOut::Element>(in[i]));
}
}
return out;
}
};
template<typename FragmentIn, typename Transform>
class UnaryOp<FragmentIn, FragmentIn, Transform>
{
public:
CUTLASS_DEVICE
static FragmentIn execute(FragmentIn &in)
{
static_assert(platform::is_same<Transform, UnaryTransform::Identity>::value ||
platform::is_same<Transform, UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
if (platform::is_same<Transform, UnaryTransform::Identity>::value )
{
return in;
}
else if (platform::is_same<Transform, UnaryTransform::Conjugate>::value )
{
for(int i=0; i < FragmentIn::kElements; ++i){
in[i] = conj(in[i]);
}
}
return in;
}
};
}
}
}
| include/cutlass/transform/thread/unary_op.h/0 | {
"file_path": "include/cutlass/transform/thread/unary_op.h",
"repo_id": "include",
"token_count": 1663
} | 39 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of tiles
from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::PitchLinear,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Element type per access
using AccessType = Array<Element, ThreadMap::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: stride_(ref.stride(0) / ThreadMap::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_base));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset in the unit of tile.
/// In GEMM/Conv implementation, this is used to move in the k dimension in the shared memory.
/// Below layouts are the shared memory layouts. Current SM50 SIMT kernels only use col major A and row major B.
/// For row major A operand, k dimension is contiguous dimension;
/// For col major A operand, k dimension is strided dimension;
/// For row major B operand, k dimension is strided dimension;
/// For col major B operand, k dimension is contiguous dimension.
/// Below two classes map col/row major to the pitch linear coordinates used
/// in this base class.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.contiguous() * Shape::kContiguous +
coord.strided() * Shape::kStrided * stride_ *
ThreadMap::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for column major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajor,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for row major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::RowMajor,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h/0 | {
"file_path": "include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h",
"repo_id": "include",
"token_count": 4230
} | 40 |
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: 4a5275a3b68094ba1d8a4b7e4c459321
tags: 645f666f9bcd5a90fca523b33c5a78b7
| python/docs/.buildinfo/0 | {
"file_path": "python/docs/.buildinfo",
"repo_id": "python",
"token_count": 85
} | 41 |
CUTLASS Python API
==================
.. toctree::
:maxdepth: 5
cutlass
| python/docs_src/source/modules.rst/0 | {
"file_path": "python/docs_src/source/modules.rst",
"repo_id": "python",
"token_count": 30
} | 42 |
################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Unit test for compute node in SM90
"""
import logging
import unittest
import cutlass
from cutlass.backend import *
from cutlass.epilogue import *
from cutlass import swizzle
from utils.evt_testbed import EVTTestBed, EVTTestCaseBase
cutlass.set_log_level(logging.WARNING)
@unittest.skipIf(device_cc() not in [80, 86, 89, 90], "This unittest is only supported on CC [80, 86, 89, 90]")
class TestEVTCompute(EVTTestCaseBase):
def test_arith(self):
"""
Test Arithmatic op
"""
def evt_arith_compute(accum, C, alpha, beta, gamma):
D = ((accum + C) * alpha - gamma) / beta
return D
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"C": self.fake_tensor(self.element, (l, m, n)),
"alpha": 1.5,
"beta": 0.5,
"gamma": 2.5,
"D": self.fake_tensor(self.element, (l, m, n))
}
launcher = EVTTestBed(self.element, evt_arith_compute, example_inputs)
input_keys = ["C", "alpha", "beta", "gamma"]
result_keys = ["D"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_func_call(self):
"""
Test Function call
"""
def evt_func_call(accum, C, alpha, beta, gamma):
D = multiply_add(relu(accum + alpha) + C, beta, gamma)
return D
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"C": self.fake_tensor(self.element, (l, m, n)),
"alpha": 1.5,
"beta": 0.5,
"gamma": 2.5,
"D": self.fake_tensor(self.element, (l, m, n))
}
launcher = EVTTestBed(self.element, evt_func_call, example_inputs)
input_keys = ["C", "alpha", "beta", "gamma"]
result_keys = ["D"]
launcher.verify((m, n, k), input_keys, result_keys, l)
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/evt/evt_compute_sm80_90.py/0 | {
"file_path": "test/python/cutlass/evt/evt_compute_sm80_90.py",
"repo_id": "test",
"token_count": 1571
} | 43 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide Implicit GEMM interface
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h"
#include "cutlass/epilogue/thread/linear_combination_residual_block.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/conv/kernel/default_deconv3d_with_broadcast.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "conv3d_with_broadcast_testbed.h"
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
TEST(SM80_Device_Deconv3d_With_Broadcast_Optimized_ImplicitGemm_f32ndhwc_f32ndhwc_f32ndhwc_simt_f32,
128x128_32x2_64x64x32) {
/// Conv operation element types for the Gemm equivalent (ImplicitGemm)
using ElementA = float;
using ElementB = float;
using ElementC = float;
using ElementCompute = float;
using ElementAccumulator = float;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasElementwise<
ElementC,
ElementAccumulator,
ElementCompute,
ElementC,
ElementC,
1,
cutlass::epilogue::thread::ReLu<float>
>;
/// Device-level Conv3d instance
using Deconv3dKernel = typename cutlass::conv::kernel::DefaultDeconv3dWithBroadcast<
ElementA, cutlass::layout::TensorNDHWC,
ElementB, cutlass::layout::TensorNDHWC,
ElementC, cutlass::layout::TensorNDHWC,
ElementAccumulator,
cutlass::arch::OpClassSimt,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 8>,
cutlass::gemm::GemmShape<32, 64, 8>,
cutlass::gemm::GemmShape<1, 1, 1>,
EpilogueOutputOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
4,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::IteratorAlgorithm::kOptimized,
cutlass::conv::StrideSupport::kUnity
>::Kernel;
using Deconv3d = cutlass::conv::device::ImplicitGemmConvolution<Deconv3dKernel>;
/// Run all unit test sizes with device-level Conv3d instance
EXPECT_TRUE(test::conv::device::TestAllConv3dWithBroadcast<Deconv3d>());
}
// Test residual block fusion: UnaryOp(BinaryOp(ActivationOp(Conv3d(X) + bias), residual))
// LinearCombinationResidualBlock does not support the split-k mode unless ActivationOp is Identity.
// This is because the activation needs to be applied to the fully accumulated output of the Conv3d op,
// which only the last thread block would have an access to, before applying BinaryOp.
// The epilogue functor in the last thread block would have to be given three inputs, namely
// partial outputs, bias, and residual, but this is not supported in the current interface.
// Set TestSplitK = false to skip split-k tests with non-trivial ActivationOp.
template <
template<typename T> class ActivationOp,
template<typename T> class BinaryOp,
template<typename T> class UnaryOp,
bool TestSplitK = true
>
static void Deconv3dSM80TestResidualBlock() {
using ElementA = float;
using ElementB = float;
using ElementC = float;
using ElementD = ElementC;
using ElementCompute = float;
using ElementAccumulator = float;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationResidualBlock<
ElementD,
ElementAccumulator,
ElementCompute,
ElementC,
1,
ActivationOp,
BinaryOp,
UnaryOp
>;
using Deconv3dKernel = typename cutlass::conv::kernel::DefaultDeconv3dWithBroadcast<
ElementA, cutlass::layout::TensorNDHWC,
ElementB, cutlass::layout::TensorNDHWC,
ElementC, cutlass::layout::TensorNDHWC,
ElementAccumulator,
cutlass::arch::OpClassSimt,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 8>,
cutlass::gemm::GemmShape<32, 64, 8>,
cutlass::gemm::GemmShape<1, 1, 1>,
EpilogueOutputOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
4,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::IteratorAlgorithm::kOptimized,
cutlass::conv::StrideSupport::kUnity
>::Kernel;
using Deconv3d = cutlass::conv::device::ImplicitGemmConvolution<Deconv3dKernel>;
struct ReferenceOp {
using OutputOp = typename Deconv3d::EpilogueOutputOp;
using ElementZ = typename OutputOp::ElementZ;
ActivationOp<ElementCompute> activation;
BinaryOp<ElementCompute> binary_op;
UnaryOp<ElementCompute> unary_op;
void operator()(ElementZ &Z, ElementZ&, ElementCompute conv3d, ElementCompute residual) {
Z = ElementZ(unary_op(binary_op(activation(conv3d), residual)));
}
};
bool passed = test::conv::device::TestAllConv3dWithBroadcast<Deconv3d, ReferenceOp, true, TestSplitK>();
EXPECT_TRUE(passed);
}
TEST(SM80_Device_Deconv3d_With_Residual_Block_Plus_Analytic_ImplicitGemm_f32ndhwc_f32ndhwc_f32ndhwc_simt_f32,
128x128_8x4_32x64x8) {
// Resnet
Deconv3dSM80TestResidualBlock<cutlass::epilogue::thread::Identity, cutlass::plus, cutlass::epilogue::thread::ReLu>();
}
////////////////////////////////////////////////////////////////////////////////
#endif // CUTLASS_ARCH_MMA_SM80_SUPPORTED
////////////////////////////////////////////////////////////////////////////////
| test/unit/conv/device/deconv3d_with_broadcast_simt_sm80.cu/0 | {
"file_path": "test/unit/conv/device/deconv3d_with_broadcast_simt_sm80.cu",
"repo_id": "test",
"token_count": 2329
} | 44 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cutlass/trace.h>
#include <cute/numeric/integral_constant.hpp>
#include <cute/numeric/math.hpp>
#include <cute/util/type_traits.hpp>
// If cute::gcd returns auto instead of common_type_t<T, U>,
// then GCC 7.5 reports the following error;
//
// ... /include/cute/numeric/math.hpp:103:26: error:
// inconsistent deduction for auto return type: ‘int’ and then ‘bool’
// if (u == 0) { return t; }
// ^
// Note that common_type_t<C<42>, C<1>>::value_type might still be bool.
TEST(CuTe_core, gcd_returns_common_type)
{
using cute::C;
constexpr auto fifteen = C<3 * 5>{};
static_assert(cute::is_same_v<decltype(fifteen)::value_type, int>);
static_assert(int(fifteen) == 15);
constexpr auto forty_two = C<2 * 3 * 7>{};
static_assert(cute::is_same_v<decltype(forty_two)::value_type, int>);
static_assert(int(forty_two) == 42);
// C<1>::value_type (as well as C<0>::value_type) may be bool.
constexpr auto one = C<1>{};
// Both inputs have value_type int.
{
constexpr auto result = cute::gcd(fifteen, forty_two);
static_assert(cute::is_same_v<decltype(result)::value_type, int>);
static_assert(int(result) == 3);
}
// One input has value_type int, and the other may have value_type bool.
{
constexpr auto result = cute::gcd(one, forty_two);
static_assert(int(result) == 1);
}
{
constexpr auto result = cute::gcd(forty_two, one);
static_assert(int(result) == 1);
}
// Both inputs may have value_type bool.
{
constexpr auto result = cute::gcd(one, one);
static_assert(int(result) == 1);
}
}
TEST(CuTe_core, lcm_returns_common_type)
{
using cute::C;
constexpr auto six = C<2 * 3>{};
static_assert(cute::is_same_v<decltype(six)::value_type, int>);
static_assert(int(six) == 6);
constexpr auto fifteen = C<3 * 5>{};
static_assert(cute::is_same_v<decltype(fifteen)::value_type, int>);
static_assert(int(fifteen) == 15);
// C<1>::value_type (as well as C<0>::value_type) may be bool.
constexpr auto one = C<1>{};
// Both inputs have value_type int.
{
constexpr auto result = cute::lcm(six, fifteen);
static_assert(cute::is_same_v<decltype(result)::value_type, int>);
static_assert(int(result) == 30);
}
// One input has value_type int, and the other may have value_type bool.
{
constexpr auto result = cute::lcm(one, six);
static_assert(cute::is_same_v<decltype(result)::value_type, int>);
static_assert(int(result) == 6);
}
{
constexpr auto result = cute::lcm(six, one);
static_assert(cute::is_same_v<decltype(result)::value_type, int>);
static_assert(int(result) == 6);
}
// Both inputs may have value_type bool.
{
constexpr auto result = cute::lcm(one, one);
static_assert(int(result) == 1);
}
}
| test/unit/cute/core/math.cpp/0 | {
"file_path": "test/unit/cute/core/math.cpp",
"repo_id": "test",
"token_count": 1595
} | 45 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/kernel/default_gemm_with_broadcast.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h"
#include "cutlass/epilogue/thread/linear_combination_bias_relu.h"
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include "testbed_gemm_with_broadcast.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes:
///
/// Z = GEMM+Bias+ReLu
/// T = Relu conditional
///
template <typename Gemm>
struct GemmWithBiasReluReferenceOp {
using OutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp;
using ElementCompute = typename OutputOp::ElementCompute;
using ElementZ = typename OutputOp::ElementZ;
using ElementT = typename OutputOp::ElementT;
typename OutputOp::BinaryOp binary_op;
typename OutputOp::ElementwiseOp elementwise_op;
GemmWithBiasReluReferenceOp() { }
void operator()(ElementZ &Z, ElementT &T, ElementCompute gemm, ElementCompute bias) {
ElementCompute kThreshold = ElementCompute();
ElementCompute z_full = binary_op(gemm, bias);
bool conditional = (z_full >= kThreshold);
if (!conditional) {
z_full = kThreshold;
}
Z = ElementZ(z_full);
T = ElementT(conditional);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Device_GemmWithBroadcast_GELU_f16n_f16n_f16n_tensor_op_f32, 128x128x32_64x64x8) {
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasElementwise<
cutlass::half_t,
float,
float,
cutlass::half_t,
cutlass::half_t,
8,
cutlass::epilogue::thread::GELU_taylor<float>
>;
using GemmKernel =
typename cutlass::gemm::kernel::DefaultGemmWithBroadcast<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand
cutlass::half_t, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm75,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 8>,
EpilogueOutputOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
2,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
test::gemm::device::TestAllGemmWithBroadcast<Gemm>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Device_GemmWithBroadcast_GELU_f16n_f16n_f16n_tensor_op_f32, 128x128x32_64x64x8) {
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasElementwise<
cutlass::half_t,
float,
float,
cutlass::half_t,
cutlass::half_t,
8,
cutlass::epilogue::thread::GELU_taylor<float>
>;
using GemmKernel =
typename cutlass::gemm::kernel::DefaultGemmWithBroadcast<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand
cutlass::half_t, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm70,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<8, 8, 4>,
EpilogueOutputOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
2,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
test::gemm::device::TestAllGemmWithBroadcast<Gemm>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Device_GemmWithBroadcast_RELU_f16n_f16n_f16n_tensor_op_f32, 128x128x32_64x64x8) {
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasRelu<
cutlass::half_t,
float,
float,
cutlass::half_t,
8,
true
>;
using GemmKernel =
typename cutlass::gemm::kernel::DefaultGemmWithBroadcast<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand
cutlass::half_t, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm75,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 8>,
EpilogueOutputOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
2,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
test::gemm::device::TestAllGemmWithBroadcast<Gemm, GemmWithBiasReluReferenceOp<Gemm> >();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Device_GemmWithBroadcast_RELU_f16n_f16n_f16n_tensor_op_f32, 128x128x32_64x64x8) {
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasRelu<
cutlass::half_t,
float,
float,
cutlass::half_t,
8,
true
>;
using GemmKernel =
typename cutlass::gemm::kernel::DefaultGemmWithBroadcast<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand
cutlass::half_t, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm70,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<8, 8, 4>,
EpilogueOutputOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
2,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
test::gemm::device::TestAllGemmWithBroadcast<Gemm, GemmWithBiasReluReferenceOp<Gemm> >();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#endif // if defiend(CUTLASS_ARCH_MMA_SM75_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmWithBroadcast_GELU_f16n_f16n_f16n_tensor_op_f32, 128x128_32x5_64x64x32_16x8x16) {
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasElementwise<
cutlass::half_t,
float,
float,
cutlass::half_t,
cutlass::half_t,
8,
cutlass::epilogue::thread::GELU_taylor<float>
>;
using GemmKernel =
typename cutlass::gemm::kernel::DefaultGemmWithBroadcast<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand
cutlass::half_t, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
EpilogueOutputOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
5,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
test::gemm::device::TestAllGemmWithBroadcast<Gemm>();
}
TEST(SM80_Device_GemmWithBroadcast_RELU_f16n_f16n_f16n_tensor_op_f32, 128x128_32x5_64x64x32_16x8x16) {
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasRelu<
cutlass::half_t,
float,
float,
cutlass::half_t,
8,
true
>;
using GemmKernel =
typename cutlass::gemm::kernel::DefaultGemmWithBroadcast<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand
cutlass::half_t, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
EpilogueOutputOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
5,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
test::gemm::device::TestAllGemmWithBroadcast<Gemm, GemmWithBiasReluReferenceOp<Gemm>>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmWithBroadcast_GELU_f16n_f16n_f16n_tensor_op_f32, 128x128_32x4_64x64x32_16x8x16) {
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasElementwise<
cutlass::half_t,
float,
float,
cutlass::half_t,
cutlass::half_t,
8,
cutlass::epilogue::thread::GELU_taylor<float>
>;
using GemmKernel =
typename cutlass::gemm::kernel::DefaultGemmWithBroadcast<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand
cutlass::half_t, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
EpilogueOutputOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
4,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
test::gemm::device::TestAllGemmWithBroadcast<Gemm>();
}
TEST(SM80_Device_GemmWithBroadcast_RELU_f16n_f16n_f16n_tensor_op_f32, 128x128_32x4_64x64x32_16x8x16) {
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasRelu<
cutlass::half_t,
float,
float,
cutlass::half_t,
8,
true
>;
using GemmKernel =
typename cutlass::gemm::kernel::DefaultGemmWithBroadcast<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand
cutlass::half_t, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
EpilogueOutputOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
4,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
test::gemm::device::TestAllGemmWithBroadcast<Gemm, GemmWithBiasReluReferenceOp<Gemm>>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmWithBroadcast_GELU_f16n_f16n_f16n_tensor_op_f32, 128x128_32x3_64x64x32_16x8x16) {
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasElementwise<
cutlass::half_t,
float,
float,
cutlass::half_t,
cutlass::half_t,
8,
cutlass::epilogue::thread::GELU_taylor<float>
>;
using GemmKernel =
typename cutlass::gemm::kernel::DefaultGemmWithBroadcast<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand
cutlass::half_t, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
EpilogueOutputOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
3,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
test::gemm::device::TestAllGemmWithBroadcast<Gemm>();
}
TEST(SM80_Device_GemmWithBroadcast_RELU_f16n_f16n_f16n_tensor_op_f32, 128x128_32x3_64x64x32_16x8x16) {
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasRelu<
cutlass::half_t,
float,
float,
cutlass::half_t,
8,
true
>;
using GemmKernel =
typename cutlass::gemm::kernel::DefaultGemmWithBroadcast<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed B operand
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8, // transposed A operand
cutlass::half_t, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
EpilogueOutputOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
3,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
test::gemm::device::TestAllGemmWithBroadcast<Gemm, GemmWithBiasReluReferenceOp<Gemm> >();
}
TEST(SM80_Device_GemmWithBroadcast_RELU_f32n_f32n_f32n_tensor_op_f32, 64x64_16x10_32x32x16_16x8x8) {
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationBiasRelu<
float,
float,
float,
float,
4,
false
>;
using GemmKernel =
typename cutlass::gemm::kernel::DefaultGemmWithBroadcast<
float, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 4, // transposed B operand
float, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 4, // transposed A operand
float, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<16, 8, 8>,
EpilogueOutputOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
10,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
test::gemm::device::TestAllGemmWithBroadcast<Gemm, GemmWithBiasReluReferenceOp<Gemm> >();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/gemm_with_broadcast_f16n_f16n_f16n_tensorop_f32_sm75.cu/0 | {
"file_path": "test/unit/gemm/device/gemm_with_broadcast_f16n_f16n_f16n_tensorop_f32_sm75.cu",
"repo_id": "test",
"token_count": 7000
} | 46 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for Sm90 f16_f16_f16 persistent EVT epilogue
D = row|column|scalar_reduce(alpha * acc + beta * C)
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h"
#include "../../common/cutlass_unit_test.h"
#include "gemm_testbed_3x_evt.hpp"
#include "sm90_evt_operations.hpp"
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
using namespace cute;
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_RowReduce) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombPerColumnReduce<
cutlass::plus, cutlass::atomic_add, float, TileShape_MNK, cutlass::half_t, float, float>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedPingpong
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
// Host reference
using HostReference = test::gemm::device::HostReduce<Gemm, test::gemm::device::HostRowReduce>;
bool passed = test::gemm::device::TestAllEVT<Gemm, HostReference>(true);
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_ColumnReduce) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombPerRowReduce<
cutlass::plus, cutlass::atomic_add, float, TileShape_MNK, cutlass::half_t, float, float>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedPingpong
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
// Host reference
using HostReference = test::gemm::device::HostReduce<Gemm, test::gemm::device::HostColumnReduce>;
bool passed = test::gemm::device::TestAllEVT<Gemm, HostReference>(true);
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_persistent_epilogue, 128x128x64_2x2x1_ScalarReduce) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombScalarReduce<
cutlass::plus, cutlass::atomic_add, float, cutlass::half_t, float, float>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedPingpong
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
// Host reference
using HostReference = test::gemm::device::HostReduce<Gemm, test::gemm::device::HostScalarReduce>;
bool passed = test::gemm::device::TestAllEVT<Gemm, HostReference>(true);
EXPECT_TRUE(passed);
}
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
| test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_reduce.cu/0 | {
"file_path": "test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_pingpong_reduce.cu",
"repo_id": "test",
"token_count": 3110
} | 47 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "../../common/cutlass_unit_test.h"
#include "gemm_testbed_3x.hpp"
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
using namespace cute;
///////////////////////////////////////////////////////////////////////////////
//////////////////////////////// output: E4M3 /////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e5m2 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e5m2t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e5m2_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e5m2 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e5m2n_e4m3n_tensor_op_gmma_f32, 64x128x128) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e5m2_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 2x2x1 //////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_2x2x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_2,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_2,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 1x4x1 //////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_1x4x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 4x1x1 //////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_4x1x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_4,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_4,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 2x4x1 //////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_2x4x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
//////////////////////////////// output: E5M2 /////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e5m2 = e5m2 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e5m2t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e5m2_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e5m2 = e4m3 * e5m2 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e5m2n_e5m2n_tensor_op_gmma_f32, 64x128x128) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e5m2_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 2x2x1 //////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x2x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_2,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_2,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 1x4x1 //////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_1x4x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 4x1x1 //////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_4x1x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_4,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_4,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 2x4x1 //////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 2x4x1 //////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1_persistent) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::KernelTmaWarpSpecializedPingpong
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Cluster 2x4x1 //////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1_non_warpspecialized) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
// Use Hopper FP8+AUX from 12.1
#if (!((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ == 0)))
///////////////////////////////////////////////////////////////////////////////
///////////////////////// output: E4M3 + Aux Tensor ///////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_aux_tensor_e4m3) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltActAmaxAux<
LayoutC, cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, cutlass::float_e4m3_t>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
#endif
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////// FP8 Accum /////////////////////////////////
///////////////////////////// e5m2 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1_persistent_fp8_fast_accum) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e5m2n_tensor_op_gmma_f32, 64x128x128_2x4x1_fp8_fast_accum) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e5m2_t, float, float>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
cutlass::float_e5m2_t, LayoutC, 16 / sizeof(cutlass::float_e5m2_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_2,_4,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::KernelTmaWarpSpecializedFP8FastAccum
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
////////////////////////// output: E4M3 + Bias ///////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_bias_bf16) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::Identity, cutlass::float_e4m3_t, float, cutlass::bfloat16_t>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
////////////////////////// output: E4M3 + Bias + Relu ////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e4m3 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_bias_bf16_relu) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltAct<
cutlass::epilogue::thread::ReLu, cutlass::float_e4m3_t, float, cutlass::bfloat16_t>;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
// Use Hopper FP8+AUX from 12.1
#if (!((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ == 0)))
///////////////////////////////////////////////////////////////////////////////
///////////////////// output: E4M3 + Aux Tensor + Bias/////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e5m2 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e5m2n_e4m3n_tensor_op_gmma_f32, 64x128x128_aux_tensor_f16_bias_f16) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltActAmaxAux<
LayoutC, cutlass::epilogue::thread::Identity,
cutlass::float_e4m3_t, // ElementOutput
float, // ElementCompute
cutlass::half_t, // ElementAux
float, // ElementAmax
cutlass::half_t>; // ElementBias
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e5m2_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////// output: E4M3 + Aux Tensor + Bias + Relu/////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e5m2 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e5m2n_e4m3n_tensor_op_gmma_f32, 64x128x128_aux_tensor_f16_relu) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltActAmaxAux<
LayoutC, cutlass::epilogue::thread::ReLu,
cutlass::float_e4m3_t, // ElementOutput
float, // ElementCompute
cutlass::half_t, // ElementAux
float, // ElementAmax
float>; // ElementBias
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e5m2_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////// e4m3 = e4m3 * e5m2 (TN) /////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e5m2n_e4m3n_tensor_op_gmma_f32, 64x128x128_aux_tensor_f16_bias_f16_relu) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionOperation = cutlass::epilogue::fusion::ScaledLinCombPerRowBiasEltActAmaxAux<
LayoutC, cutlass::epilogue::thread::ReLu,
cutlass::float_e4m3_t, // ElementOutput
float, // ElementCompute
cutlass::half_t, // ElementAux
float, // ElementAmax
cutlass::half_t>; // ElementBias
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
cutlass::float_e4m3_t, LayoutC, 16 / sizeof(cutlass::float_e4m3_t),
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e5m2_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>());
}
#endif
///////////////////////////////////////////////////////////////////////////////
//////////////////////////////// TMA epilogue /////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3n_tensor_op_gmma_f32, 64x128x128_tma_epilogue) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16,
cutlass::float_e4m3_t, LayoutC, 16,
cutlass::epilogue::TmaWarpSpecialized
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::KernelTmaWarpSpecializedPingpong
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
TEST(SM90_Device_Gemm_e4m3t_e4m3n_e4m3t_tensor_op_gmma_f32, 64x128x128_tma_epilogue) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using EpilogueOp = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::float_e4m3_t, LayoutC, 16,
cutlass::float_e4m3_t, LayoutC, 16,
cutlass::epilogue::TmaWarpSpecialized
>::CollectiveOp;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
Shape<_64,_128,_128>, Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOp::SharedStorage)>,
cutlass::gemm::KernelTmaWarpSpecializedPingpong
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
| test/unit/gemm/device/sm90_gemm_f8_f8_f8_tensor_op_fp32.cu/0 | {
"file_path": "test/unit/gemm/device/sm90_gemm_f8_f8_f8_tensor_op_fp32.cu",
"repo_id": "test",
"token_count": 20775
} | 48 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_grouped.h"
#include "cutlass/gemm/kernel/default_gemm_grouped.h"
#include "cutlass/gemm/device/gemm_grouped.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
struct TestbedGrouped {
//
// Type definitions
//
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using EpilogueOutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp;
using ElementCompute = typename EpilogueOutputOp::ElementCompute;
using LayoutA = typename Gemm::LayoutA;
using LayoutB = typename Gemm::LayoutB;
using LayoutC = typename Gemm::LayoutC;
using MatrixCoord = typename LayoutC::TensorCoord;
//
// Data members
//
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint32_t seed;
int problem_count;
std::vector<cutlass::gemm::GemmCoord> problem_sizes_host;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device;
std::vector<int64_t> offset_A;
std::vector<int64_t> offset_B;
std::vector<int64_t> offset_C;
std::vector<int64_t> offset_D;
std::vector<int64_t> lda_host;
std::vector<int64_t> ldb_host;
std::vector<int64_t> ldc_host;
std::vector<int64_t> ldd_host;
cutlass::DeviceAllocation<int64_t> lda;
cutlass::DeviceAllocation<int64_t> ldb;
cutlass::DeviceAllocation<int64_t> ldc;
cutlass::DeviceAllocation<int64_t> ldd;
cutlass::DeviceAllocation<ElementA> block_A;
cutlass::DeviceAllocation<ElementB> block_B;
cutlass::DeviceAllocation<ElementC> block_C;
cutlass::DeviceAllocation<ElementC> block_D;
cutlass::DeviceAllocation<ElementA *> ptr_A;
cutlass::DeviceAllocation<ElementB *> ptr_B;
cutlass::DeviceAllocation<ElementC *> ptr_C;
cutlass::DeviceAllocation<ElementC *> ptr_D;
//
// Methods
//
TestbedGrouped(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint32_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) {
scope_max = 5;
scope_min = -5;
}
else {
scope_max = 8;
scope_min = -8;
}
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
// no fill - remain zero
}
return true;
}
/// Initializes data structures
void initialize() {
//
// Choose random problem sizes
//
// construct a few problems of random sizes
srand(seed);
int64_t total_elements_A = 0;
int64_t total_elements_B = 0;
int64_t total_elements_C = 0;
int64_t total_elements_D = 0;
lda_host.resize(problem_count);
ldb_host.resize(problem_count);
ldc_host.resize(problem_count);
ldd_host.resize(problem_count);
problem_sizes_host.clear();
problem_sizes_host.resize(problem_count);
for (int32_t i = 0; i < problem_count; ++i) {
cutlass::gemm::GemmCoord problem(
8 * (rand() % 64) + 24,
8 * (rand() % 64) + 24,
8 * (rand() % 64) + 24);
if (!i) {
problem = cutlass::gemm::GemmCoord(48, 16, 8);
}
problem_sizes_host.at(i) = problem;
// std::cout << "Problem[" << i << "]: " << problem << std::endl;
lda_host.at(i) = LayoutA::packed({problem.m(), problem.k()}).stride(0);
ldb_host.at(i) = LayoutB::packed({problem.k(), problem.n()}).stride(0);
ldc_host.at(i) = LayoutC::packed({problem.m(), problem.n()}).stride(0);
ldd_host.at(i) = LayoutC::packed({problem.m(), problem.n()}).stride(0);
offset_A.push_back(total_elements_A);
offset_B.push_back(total_elements_B);
offset_C.push_back(total_elements_C);
offset_D.push_back(total_elements_D);
int64_t elements_A = problem.m() * problem.k();
int64_t elements_B = problem.k() * problem.n();
int64_t elements_C = problem.m() * problem.n();
int64_t elements_D = problem.m() * problem.n();
total_elements_A += elements_A;
total_elements_B += elements_B;
total_elements_C += elements_C;
total_elements_D += elements_D;
// Random strides between problems?
}
problem_sizes_device.reset(problem_count);
problem_sizes_device.copy_from_host(problem_sizes_host.data());
lda.reset(problem_count);
ldb.reset(problem_count);
ldc.reset(problem_count);
ldd.reset(problem_count);
lda.copy_from_host(lda_host.data());
ldb.copy_from_host(ldb_host.data());
ldc.copy_from_host(ldc_host.data());
ldd.copy_from_host(ldd_host.data());
//
// Assign pointers
//
block_A.reset(total_elements_A);
block_B.reset(total_elements_B);
block_C.reset(total_elements_C);
block_D.reset(total_elements_D);
std::vector<ElementA *> ptr_A_host(problem_count);
std::vector<ElementB *> ptr_B_host(problem_count);
std::vector<ElementC *> ptr_C_host(problem_count);
std::vector<ElementC *> ptr_D_host(problem_count);
for (int32_t i = 0; i < problem_count; ++i) {
ptr_A_host.at(i) = block_A.get() + offset_A.at(i);
ptr_B_host.at(i) = block_B.get() + offset_B.at(i);
ptr_C_host.at(i) = block_C.get() + offset_C.at(i);
ptr_D_host.at(i) = block_D.get() + offset_D.at(i);
}
ptr_A.reset(problem_count);
ptr_A.copy_from_host(ptr_A_host.data());
ptr_B.reset(problem_count);
ptr_B.copy_from_host(ptr_B_host.data());
ptr_C.reset(problem_count);
ptr_C.copy_from_host(ptr_C_host.data());
ptr_D.reset(problem_count);
ptr_D.copy_from_host(ptr_D_host.data());
//
// Initialize the problems of the workspace
//
for (int32_t i = 0; i < problem_count; ++i) {
cutlass::gemm::GemmCoord problem = problem_sizes_host.at(i);
LayoutA layout_A(lda_host.at(i));
LayoutB layout_B(ldb_host.at(i));
LayoutC layout_C(ldc_host.at(i));
LayoutC layout_D(ldd_host.at(i));
MatrixCoord extent_A{problem.m(), problem.k()};
MatrixCoord extent_B{problem.k(), problem.n()};
MatrixCoord extent_C{problem.m(), problem.n()};
std::vector<ElementA> matrix_A(layout_A.capacity(extent_A));
std::vector<ElementB> matrix_B(layout_B.capacity(extent_B));
std::vector<ElementC> matrix_C(layout_C.capacity(extent_C));
std::vector<ElementC> matrix_D(layout_D.capacity(extent_C));
initialize_tensor(cutlass::TensorView<ElementA, LayoutA>(matrix_A.data(), layout_A, extent_A), init_A, seed * 2021);
initialize_tensor(cutlass::TensorView<ElementB, LayoutB>(matrix_B.data(), layout_B, extent_B), init_B, seed * 2022);
initialize_tensor(cutlass::TensorView<ElementC, LayoutC>(matrix_C.data(), layout_C, extent_C), init_C, seed * 2023);
cutlass::device_memory::copy_to_device(ptr_A_host.at(i), matrix_A.data(), matrix_A.size());
cutlass::device_memory::copy_to_device(ptr_B_host.at(i), matrix_B.data(), matrix_B.size());
cutlass::device_memory::copy_to_device(ptr_C_host.at(i), matrix_C.data(), matrix_C.size());
cutlass::device_memory::copy_to_device(ptr_D_host.at(i), matrix_D.data(), matrix_D.size());
}
}
/// Verifies the result is a GEMM
bool verify(
ElementCompute alpha,
ElementCompute beta) {
bool passed = true;
for (int32_t i = 0; i < problem_count; ++i) {
cutlass::gemm::GemmCoord problem = problem_sizes_host.at(i);
LayoutA layout_A(lda_host.at(i));
LayoutB layout_B(ldb_host.at(i));
LayoutC layout_C(ldc_host.at(i));
LayoutC layout_D(ldd_host.at(i));
MatrixCoord extent_A{problem.m(), problem.k()};
MatrixCoord extent_B{problem.k(), problem.n()};
MatrixCoord extent_C{problem.m(), problem.n()};
std::vector<ElementA> matrix_A(layout_A.capacity(extent_A));
std::vector<ElementB> matrix_B(layout_B.capacity(extent_B));
std::vector<ElementC> matrix_C(layout_C.capacity(extent_C));
std::vector<ElementC> matrix_D(layout_D.capacity(extent_C));
std::vector<ElementC> matrix_Ref(layout_D.capacity(extent_C));
cutlass::device_memory::copy_to_host(matrix_A.data(), block_A.get() + offset_A.at(i), matrix_A.size());
cutlass::device_memory::copy_to_host(matrix_B.data(), block_B.get() + offset_B.at(i), matrix_B.size());
cutlass::device_memory::copy_to_host(matrix_C.data(), block_C.get() + offset_C.at(i), matrix_C.size());
cutlass::device_memory::copy_to_host(matrix_D.data(), block_D.get() + offset_D.at(i), matrix_D.size());
cutlass::TensorView<ElementA, LayoutA> view_A(matrix_A.data(), layout_A, extent_A);
cutlass::TensorView<ElementB, LayoutB> view_B(matrix_B.data(), layout_B, extent_B);
cutlass::TensorView<ElementC, LayoutC> view_C(matrix_C.data(), layout_C, extent_C);
cutlass::TensorView<ElementC, LayoutC> view_D(matrix_D.data(), layout_D, extent_C);
cutlass::TensorView<ElementC, LayoutC> view_Ref(matrix_Ref.data(), layout_D, extent_C);
// Reference GEMM
cutlass::reference::host::GemmComplex<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute, ElementAccumulator
>(
problem,
alpha,
view_A,
Gemm::kTransformA,
view_B,
Gemm::kTransformB,
beta,
view_C,
view_Ref,
ElementAccumulator(0)
);
// Ensure that no input or output is entirely zero
EXPECT_GT(cutlass::reference::host::TensorNorm(view_A), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(view_B), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(view_C), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(view_D), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(view_Ref), 0);
// Compare against reference
passed = cutlass::reference::host::TensorEquals(view_D, view_Ref);
if (!passed) {
std::ofstream file("testbed_grouped_errors.txt");
file
<< "problem: " << problem << " [group: " << i << "]\n"
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << view_A
<< "\nB =\n" << view_B
<< "\nC =\n" << view_C
<< "\n\nReference =\n" << view_Ref
<< "\nComputed =\n" << view_D;
return passed;
}
}
return passed;
}
/// Executes one test
bool run(
int problem_count,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
this->problem_count = problem_count;
// Initialize the problem
initialize();
int threadblock_count = Gemm::sufficient(problem_sizes_host.data(), problem_count);
// Early exit
if (!threadblock_count) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device resources." << std::endl;
}
return true;
}
// Configure the GEMM arguments
typename EpilogueOutputOp::Params epilogue_op(alpha, beta);
// Configure GEMM arguments
typename Gemm::Arguments args(
problem_sizes_device.get(),
problem_count,
threadblock_count,
epilogue_op,
ptr_A.get(),
ptr_B.get(),
ptr_C.get(),
ptr_D.get(),
lda.get(),
ldb.get(),
ldc.get(),
ldd.get(),
problem_sizes_host.data()
);
// Initialize the GEMM object
Gemm gemm;
size_t workspace_size = gemm.get_workspace_size(args);
cutlass::DeviceAllocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm.initialize(args, workspace.get());
if (status != cutlass::Status::kSuccess) {
return false;
}
// Run the GEMM object
status = gemm.run();
if (status != cutlass::Status::kSuccess) {
return false;
}
// Wait for completion
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess)
<< "Kernel execution error: " << cudaGetErrorString(result);
if (result != cudaSuccess) {
return false;
}
// Verify correctness
return verify(alpha, beta);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // device
} // gemm
} // test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_grouped.h/0 | {
"file_path": "test/unit/gemm/device/testbed_grouped.h",
"repo_id": "test",
"token_count": 6616
} | 49 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for threadblock-level GEMM
*/
#include "mma_multistage_testbed.h"
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous, tensor_op_16x128x64_16x32x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(32, 256, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC,
cutlass::arch::OpClassTensorOp, Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous, tensor_op_128x16x64_32x16x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 32, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 16, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC,
cutlass::arch::OpClassTensorOp, Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous, tensor_op_32x128x32_32x32x32_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 256, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC,
cutlass::arch::OpClassTensorOp, Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous, tensor_op_128x32x32_32x32x32_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC,
cutlass::arch::OpClassTensorOp, Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x64x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x64x64_64x32x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x128x64_32x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x128x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_256x256x384_128x128x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_512x256x384_256x128x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x64x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x64x32_64x32x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x128x32_32x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x128x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_256x256x384_128x128x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_512x256x768_256x128x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x64x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x64x32_64x32x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x128x32_32x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x128x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_256x256x192_128x128x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 192);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_512x256x384_256x128x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 192);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x64x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x64x16_64x32x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x128x16_32x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x128x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_256x256x192_128x128x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 192);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
multicta_512x256x384_256x128x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x64_32x32x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x64_64x32x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x128x64_32x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x128x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x384_128x128x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x768_256x128x64_64x64x64_16x8x16_3stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x32_32x32x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x32_64x32x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x128x32_32x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x128x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x384_128x128x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x768_256x128x32_64x64x32_16x8x16_4stage) {
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x32_32x32x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x32_64x32x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x128x32_32x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x128x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x192_128x128x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 192);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x192_256x128x32_64x64x32_16x8x8_3stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 192);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x16_32x32x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x16_64x32x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x128x16_32x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x128x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x192_128x128x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 192);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x192_256x128x16_64x64x16_16x8x8_4stage) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = float;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 192);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x128_64x64x128_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x128_32x32x128_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x128_64x32x128_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x128x128_32x64x128_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x128x128_64x64x128_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x768_128x128x128_64x64x128_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x768_256x128x128_64x64x128_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x64_64x64x64_16x8x32_4stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x64_32x32x64_16x8x32_4stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x64_64x32x64_16x8x32_4stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x128x64_32x64x64_16x8x32_4stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x128x64_64x64x64_16x8x32_4stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x768_128x128x64_64x64x64_16x8x32_4stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x768_256x128x64_64x64x64_16x8x32_4stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x256_64x64x256_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 256>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x256_32x32x256_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 256>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x256_64x32x256_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 256>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x256x256_32x64x256_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 256>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x256x256_64x64x256_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 256>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x1536_128x256x256_64x64x256_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 1536);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 256>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x1536_256x256x256_64x64x256_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 1536);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 256>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x128_64x64x128_16x8x64_4stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x128_32x32x128_16x8x64_4stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x128_64x32x128_16x8x64_4stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x256x128_32x64x128_16x8x64_4stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x256x128_64x64x128_16x8x64_4stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 1024);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x1536_128x256x128_64x64x128_16x8x64_4stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 1536);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x1536_256x256x128_64x64x128_16x8x64_4stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 1536);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x1024_64x64x1024_16x8x256_3stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 1024>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x1024_32x32x1024_16x8x256_3stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 1024>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x1024_64x32x1024_16x8x256_3stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 1024>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x1024x1024_32x64x1024_16x8x256_3stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 1024>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x1024x1024_64x64x1024_16x8x256_3stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 1024>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x6144_128x1024x1024_64x64x1024_16x8x256_3stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 6144);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 1024>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x6144_256x1024x1024_64x64x1024_16x8x256_3stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 6144);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 1024>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x512_64x64x512_16x8x256_4stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 512>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x64x512_32x32x512_16x8x256_4stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 512>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x64x512_64x32x512_16x8x256_4stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 512>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_64x128x512_32x64x512_16x8x256_4stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 512>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
tensor_op_128x128x512_64x64x512_16x8x256_4stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 4096);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 512>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_256x256x6144_128x128x512_64x64x512_16x8x256_4stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 6144);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 512>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise,
multicta_512x256x6144_256x128x512_64x64x512_16x8x256_4stage) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 6144);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 512>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_64x64x16_32x64x16_8x8x4_3stage) {
using ElementA = double;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = double;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = double;
using LayoutC = cutlass::layout::RowMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 16);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 2, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k())
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_congruous,
tensor_op_128x128x16_32x64x16_8x8x4_3stage) {
using ElementA = double;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = double;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = double;
using LayoutC = cutlass::layout::RowMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 64);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k())
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
tensor_op_64x128x64_32x64x64_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
using ElementB = int8_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
tensor_op_128x128x64_64x64x64_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
using ElementB = int8_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 256);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
multicta_256x256x384_128x128x64_64x64x64_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
using ElementB = int8_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
multicta_512x256x384_256x128x64_64x64x64_16x8x32_3stage) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
using ElementB = int8_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 384);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
tensor_op_64x128x128_32x64x128_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
tensor_op_128x128x128_64x64x128_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 512);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
multicta_256x256x768_128x128x128_64x64x128_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(256, 256, 768);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_interleaved,
multicta_512x256x1536_256x128x128_64x64x128_16x8x64_3stage) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
using ElementC = int;
using LayoutC = cutlass::layout::ColumnMajor;
cutlass::gemm::GemmCoord problem_size(512, 256, 1536);
using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
float alpha = 1.f;
float beta = 0.0f;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(2, 2);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_gemm_threadblock_crosswise_f64,
tensor_op_32x32x16_16x16x16_8x8x4_4stage) {
using ElementA = double;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = double;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = double;
using LayoutC = cutlass::layout::RowMajor;
cutlass::gemm::GemmCoord problem_size(32, 32, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k())
.run(grid, block);
}
TEST(SM80_gemm_threadblock_crosswise_f64,
tensor_op_64x64x16_32x32x16_8x8x4_4stage) {
using ElementA = double;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = double;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = double;
using LayoutC = cutlass::layout::RowMajor;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k())
.run(grid, block);
}
TEST(SM80_gemm_threadblock_crosswise_f64,
tensor_op_64x128x16_32x64x16_8x8x4_4stage) {
using ElementA = double;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = double;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = double;
using LayoutC = cutlass::layout::RowMajor;
cutlass::gemm::GemmCoord problem_size(64, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k())
.run(grid, block);
}
TEST(SM80_gemm_threadblock_crosswise_f64,
tensor_op_128x64x16_64x32x16_8x8x4_4stage) {
using ElementA = double;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = double;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = double;
using LayoutC = cutlass::layout::RowMajor;
cutlass::gemm::GemmCoord problem_size(128, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
int const Stages = 4;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 4, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k())
.run(grid, block);
}
TEST(SM80_gemm_threadblock_crosswise_f64,
tensor_op_128x128x16_32x64x16_8x8x4_3stage) {
using ElementA = double;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = double;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = double;
using LayoutC = cutlass::layout::RowMajor;
cutlass::gemm::GemmCoord problem_size(128, 128, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
int const Stages = 3;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp,
Stages>;
dim3 grid(1, 1);
dim3 block(32, 8, 1);
test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
problem_size.k())
.run(grid, block);
}
////////////////////////////////////////////////////////////////////////////////
#endif
| test/unit/gemm/threadblock/mma_multistage.cu/0 | {
"file_path": "test/unit/gemm/threadblock/mma_multistage.cu",
"repo_id": "test",
"token_count": 53626
} | 50 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit testbed for kernel-level GEMM
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/platform/platform.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/vector.h"
#include "cutlass/numeric_types.h"
#include "cutlass/core_io.h"
#include "cutlass/util/host_tensor_planar_complex.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/gemm_planar_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_fill.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Mma>
__global__ void kernel_mma_planar_complex(
cutlass::gemm::GemmCoord problem_size,
typename Mma::IteratorA::Params params_A,
typename Mma::IteratorA::Element *ptr_A,
int64_t imaginary_stride_A,
typename Mma::IteratorB::Params params_B,
typename Mma::IteratorB::Element *ptr_B,
int64_t imaginary_stride_B,
typename Mma::ElementC *ptr_C,
typename Mma::LayoutC::Stride::Index ldc, int64_t imaginary_stride_C) {
// Shared storage needed by threadblock-scoped matrix multiply-accumulate
__shared__ typename Mma::SharedStorage shared_storage;
// Compute threadblock location
cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y),
0};
cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM,
tb_tile_offset.k()};
cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(),
tb_tile_offset.n() * Mma::Shape::kN};
// Compute position within threadblock
int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x;
// Construct iterators to A operand
typename Mma::IteratorA iterator_A_real(params_A, ptr_A,
{problem_size.m(), problem_size.k()},
tb_thread_id, tb_offset_A);
typename Mma::IteratorA iterator_A_imag(params_A, ptr_A + imaginary_stride_A,
{problem_size.m(), problem_size.k()},
tb_thread_id, tb_offset_A);
// Construct iterators to B operand
typename Mma::IteratorB iterator_B_real(params_B, ptr_B,
{problem_size.k(), problem_size.n()},
tb_thread_id, tb_offset_B);
typename Mma::IteratorB iterator_B_imag(params_B, ptr_B + imaginary_stride_B,
{problem_size.k(), problem_size.n()},
tb_thread_id, tb_offset_B);
int warp_id = threadIdx.y;
int lane_id = threadIdx.x;
// Construct thread-scoped matrix multiply
Mma mma(shared_storage, tb_thread_id, warp_id, threadIdx.x);
typename Mma::FragmentC accum;
accum.clear();
int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accum, iterator_A_real, iterator_A_imag, iterator_B_real, iterator_B_imag, accum);
// Output results
typename Mma::Operator::IteratorC iterator_C({ptr_C, ldc}, lane_id);
iterator_C.add_tile_offset(
{(tb_tile_offset.m() * Mma::WarpCount::kM) +
(warp_id % Mma::WarpCount::kM),
(tb_tile_offset.n() * Mma::WarpCount::kN) +
(warp_id / Mma::WarpCount::kM)});
iterator_C.store(accum.real);
iterator_C.store_with_pointer_offset(accum.imag, imaginary_stride_C);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Threadblock-level matrix multiply-accumulate
typename Mma_>
struct TestbedPlanarComplex {
using Mma = Mma_;
using ThreadblockShape = typename Mma::Shape;
using IteratorA = typename Mma::IteratorA;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using IteratorB = typename Mma::IteratorB;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Mma::ElementC;
using ElementAccumulator = typename Mma::ElementC;
using LayoutC = typename Mma::LayoutC;
using ThreadMapA = typename Mma::IteratorA::ThreadMap;
using ThreadMapB = typename Mma::IteratorB::ThreadMap;
using AccessTypeA = cutlass::Array<ElementA, ThreadMapA::kElementsPerAccess>;
using AccessTypeB = cutlass::Array<ElementB, ThreadMapB::kElementsPerAccess>;
static int const Stages = Mma::kStages;
static cutlass::arch::CacheOperation::Kind const CacheOpA =
Mma::kCacheOpA;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
Mma::kCacheOpB;
//
// Data members
//
cutlass::HostTensorPlanarComplex<ElementA, LayoutA> matrix_A;
cutlass::HostTensorPlanarComplex<ElementB, LayoutB> matrix_B;
cutlass::HostTensorPlanarComplex<ElementC, LayoutC> matrix_C_computed;
cutlass::HostTensorPlanarComplex<ElementC, LayoutC> matrix_C_reference;
cutlass::gemm::GemmCoord problem_size;
//
// Methods
//
/// Allocates workspace in device memory
TestbedPlanarComplex(int m, int n, int k)
: problem_size(m, n, k) {
matrix_A.reset(cutlass::make_Coord(m, k));
matrix_B.reset(cutlass::make_Coord(k, n));
matrix_C_computed.reset(cutlass::make_Coord(m, n));
matrix_C_reference.reset(cutlass::make_Coord(m, n), false);
}
/// Runs the test
bool run(
dim3 grid, dim3 block,
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) {
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementA>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_A.host_view(), seed, scope_max, scope_min, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
for (int i = 0; i < matrix_A.capacity() * 2; ++i) {
matrix_A.host_data()[i] = cutlass::half_t(float(i % 5) - 2);
}
/*
cutlass::reference::host::BlockFillSequential(matrix_A.host_data(),
matrix_A.capacity() * 2);
*/
} else if (init_A == cutlass::Distribution::Identity) {
//cutlass::reference::host::TensorFillIdentity(matrix_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementB>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementB>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_B.host_view(), seed + 16, scope_max, scope_min, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(matrix_B.host_data(),
matrix_B.capacity() * 2);
for (int i = 0; i < matrix_B.capacity() * 2; ++i) {
matrix_B.host_data()[i] = cutlass::half_t(float((i + 3) % 5) - 2);
}
} else if (init_B == cutlass::Distribution::Identity) {
//cutlass::reference::host::TensorFillIdentity(matrix_B.host_view());
} else {
return false;
}
matrix_A.sync_device();
matrix_B.sync_device();
matrix_C_computed.sync_device();
typename IteratorA::Params params_A(matrix_A.layout());
typename IteratorB::Params params_B(matrix_B.layout());
test::gemm::threadblock::kernel_mma_planar_complex<Mma><<<grid, block>>>(
problem_size,
params_A,
matrix_A.device_data(),
matrix_A.imaginary_stride(),
params_B,
matrix_B.device_data(),
matrix_B.imaginary_stride(),
matrix_C_computed.device_data(),
matrix_C_computed.layout().stride(0),
matrix_C_computed.imaginary_stride()
);
//
// Check error code
//
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess)
<< " kernel error: " << cudaGetErrorString(result);
matrix_C_computed.sync_host();
cutlass::reference::host::GemmPlanarComplex<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator
>(
problem_size,
cutlass::complex<ElementAccumulator>(ElementAccumulator(1)),
matrix_A.host_ref(),
Mma::kTransformA,
matrix_B.host_ref(),
Mma::kTransformB,
cutlass::complex<ElementAccumulator>(ElementAccumulator(0)),
matrix_C_reference.host_ref(),
matrix_C_reference.host_ref()
);
bool passed = cutlass::reference::host::TensorEquals(
matrix_C_computed.host_view(),
matrix_C_reference.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("mma_pipelined_testbed_errors.txt");
output
<< "A:\n" << matrix_A.host_view() << "\n"
<< "B:\n" << matrix_B.host_view() << "\n"
<< "Reference:\n"
<< matrix_C_reference.host_view() << "\n"
<< "Computed:\n"
<< matrix_C_computed.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace test
| test/unit/gemm/threadblock/mma_planar_complex_testbed.h/0 | {
"file_path": "test/unit/gemm/threadblock/mma_planar_complex_testbed.h",
"repo_id": "test",
"token_count": 4837
} | 51 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_types.h"
#include "cutlass/subbyte_reference.h"
#include "cutlass/platform/platform.h"
#include "cutlass/arch/arch.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/host_uncompress.h"
namespace test {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test kernel
template <typename Mma, typename ThreadblockShape>
__global__ void kernel(
typename Mma::ElementC *output_C,
typename Mma::ElementA const *input_A,
typename Mma::ElementB const *input_B,
typename Mma::ElementC const *input_C,
int iterations = 1) {
// Use AlignedBuffer to store trivially copyable objects in unions and __shared__ buffers.
__shared__ cutlass::AlignedBuffer<
typename Mma::ElementA, ThreadblockShape::kM * ThreadblockShape::kK> smem_buffer_A;
__shared__ cutlass::AlignedBuffer<
typename Mma::ElementB, ThreadblockShape::kN * ThreadblockShape::kK> smem_buffer_B;
if (threadIdx.x == 0) {
typename Mma::ElementA *smem_ptr_A = smem_buffer_A.data();
#pragma unroll 1
for (size_t i = 0; i < smem_buffer_A.size(); ++i) {
cutlass::ReferenceFactory<typename Mma::ElementA>::get(smem_ptr_A, i) =
cutlass::ReferenceFactory<typename cutlass::platform::remove_const<
typename Mma::ElementA>::type>::get(input_A, i);
}
typename Mma::ElementB *smem_ptr_B = smem_buffer_B.data();
#pragma unroll 1
for (size_t i = 0; i < smem_buffer_B.size(); ++i) {
cutlass::ReferenceFactory<typename Mma::ElementB>::get(smem_ptr_B, i) =
cutlass::ReferenceFactory<typename cutlass::platform::remove_const<
typename Mma::ElementB>::type>::get(input_B, i);
}
}
__syncthreads();
//
// Construct warp-level matrix product
//
using FragmentA = typename Mma::FragmentA;
using FragmentB = typename Mma::FragmentB;
using FragmentC = typename Mma::FragmentC;
typename Mma::LayoutA layout_A = Mma::LayoutA::packed({ThreadblockShape::kM, ThreadblockShape::kK});
typename Mma::LayoutB layout_B = Mma::LayoutB::packed({ThreadblockShape::kK, ThreadblockShape::kN});
typename Mma::LayoutC layout_C = Mma::LayoutC::packed({Mma::Shape::kM, Mma::Shape::kN});
typename Mma::IteratorA iter_A({smem_buffer_A.data(), layout_A}, cutlass::arch::LaneId());
typename Mma::IteratorB iter_B({smem_buffer_B.data(), layout_B}, cutlass::arch::LaneId());
FragmentA frag_A;
FragmentB frag_B;
FragmentC accum;
Mma mma;
accum.clear();
CUTLASS_PRAGMA_NO_UNROLL
for (int iter = 0; iter < iterations; ++iter) { // place in loop that is not unrolled
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < ThreadblockShape::kK;
k += Mma::Policy::MmaShape::kK) {
iter_A.load(frag_A);
iter_B.load(frag_B);
++iter_A;
++iter_B;
mma(accum, frag_A, frag_B, accum);
}
}
typename Mma::IteratorC iter_C({output_C, layout_C}, cutlass::arch::LaneId());
iter_C.store(accum);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Warp-level matrix multiply-accumulate
typename Mma_,
/// Size of threadblock-scoped shape used to store SMEM
typename ThreadblockShape_,
/// The inner product operation performed by GEMM
typename Operator_ = cutlass::arch::OpMultiplyAdd
>
struct Testbed {
/// Thread-level matrix multiply-accumulate operator
using Mma = Mma_;
using ThreadblockShape = ThreadblockShape_;
using Operator = Operator_;
using Shape = typename Mma::Shape;
using ElementA = typename Mma::ElementA;
using LayoutA = typename Mma::LayoutA;
using ElementB = typename Mma::ElementB;
using LayoutB = typename Mma::LayoutB;
using ElementC = typename Mma::ElementC;
using LayoutC = typename Mma::LayoutC;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
//
// Methods
//
/// Allocates workspace in device memory
Testbed() {
tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK));
tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN));
tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.major == 9) {
// NVIDIA Hopper drops support for several data types
if (
cutlass::sizeof_bits<ElementA>::value < 8 ||
cutlass::sizeof_bits<ElementB>::value < 8 ||
cutlass::sizeof_bits<ElementC>::value < 8) {
return false;
}
}
return true;
}
/// Runs the test
bool run(
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) {
if (!sufficient()) {
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementA>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::BlockFillRandomUniform(tensor_A.host_data(),
tensor_A.capacity(), seed, scope_max, scope_min, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_A.host_data(),
tensor_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementB>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementB>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::BlockFillRandomUniform(tensor_B.host_data(),
tensor_B.capacity(), seed, scope_max, scope_min, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_B.host_data(),
tensor_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(
tensor_C.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_computed.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_reference.host_view(),
ElementC(0)
);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
// launch kernel
kernel<Mma, ThreadblockShape><<< dim3(1, 1), dim3(32, 1, 1) >>>(
tensor_D_computed.device_data(),
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data());
// verify no errors
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result);
if (result != cudaSuccess) {
return false;
}
tensor_D_computed.sync_host();
//
// Reference implementation
//
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC,
Operator>
reference_gemm;
reference_gemm(
{Shape::kM, Shape::kN, ThreadblockShape::kK},
ElementC(1),
tensor_A.host_ref(),
tensor_B.host_ref(),
ElementC(0),
tensor_D_reference.host_ref()
);
//
// Verify equivalence
//
// compare
bool passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
cutlass::TensorView<ElementA, cutlass::layout::ColumnMajor> tensor_A_physical(
tensor_A.host_data(),
tensor_A.stride()[0],
tensor_A.extent());
cutlass::TensorView<ElementB, cutlass::layout::RowMajor> tensor_B_physical(
tensor_B.host_data(),
tensor_B.stride()[0],
tensor_B.extent());
std::cout <<"cutlass::sizeof_bits<ElementA>::value = "<<cutlass::sizeof_bits<ElementA>::value<<"\n";
std::cout
<< "A:\n" << tensor_A.host_view() << "\n\n"
<< "A(physical - stride: " << tensor_A.stride()[0]
<< ", extent: " << tensor_A.extent() << "):\n" << tensor_A_physical << "\n\n";
std::cout <<"cutlass::sizeof_bits<ElementB>::value = "<<cutlass::sizeof_bits<ElementB>::value<<"\n";
std::cout
<< "B:\n" << tensor_B.host_view() << "\n\n"
<< "B(physical - stride: " << tensor_B.stride()[0]
<< ", extent: " << tensor_B.extent() << "):\n" << tensor_B_physical << "\n\n";
std::cout
<< "C:\n" << tensor_C.host_view() << "\n\n"
<< "Reference:\n" << tensor_D_reference.host_view() << "\n\n"
<< "Computed:\n" << tensor_D_computed.host_view() << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Warp-level matrix multiply-accumulate
typename Mma_,
/// Size of threadblock-scoped shape used to store SMEM
typename ThreadblockShape_
>
struct TestbedComplex {
/// Thread-level matrix multiply-accumulate operator
using Mma = Mma_;
using ThreadblockShape = ThreadblockShape_;
using Shape = typename Mma::Shape;
using ElementA = typename Mma::ElementA;
using LayoutA = typename Mma::LayoutA;
using ElementB = typename Mma::ElementB;
using LayoutB = typename Mma::LayoutB;
using ElementC = typename Mma::ElementC;
using LayoutC = typename Mma::LayoutC;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
//
// Methods
//
/// Allocates workspace in device memory
TestbedComplex() {
tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK));
tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN));
tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.major == 9) {
// NVIDIA Hopper drops support for several data types
if (
cutlass::sizeof_bits<ElementA>::value < 8 ||
cutlass::sizeof_bits<ElementB>::value < 8 ||
cutlass::sizeof_bits<ElementC>::value < 8) {
return false;
}
}
return true;
}
/// Runs the test
bool run(
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) {
if (!sufficient()) {
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(tensor_A.host_view(),
seed, 8, -8, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_A.host_data(),
tensor_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(tensor_B.host_view(),
seed + 16, 8, -8, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_B.host_data(),
tensor_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(
tensor_C.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_computed.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_reference.host_view(),
ElementC(0)
);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
// launch kernel
kernel<Mma, ThreadblockShape><<< dim3(1, 1), dim3(32, 1, 1) >>>(
tensor_D_computed.device_data(),
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data());
// verify no errors
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result);
if (result != cudaSuccess) {
return false;
}
tensor_D_computed.sync_host();
//
// Reference implementation
//
cutlass::reference::host::GemmComplex(
{Shape::kM, Shape::kN, ThreadblockShape::kK},
ElementC(1),
tensor_A.host_ref(),
Mma::kTransformA,
tensor_B.host_ref(),
Mma::kTransformB,
ElementC(0),
tensor_C.host_ref(),
tensor_D_reference.host_ref()
);
//
// Verify equivalence
//
// compare
bool passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
cutlass::TensorView<ElementA, cutlass::layout::ColumnMajor> tensor_A_physical(
tensor_A.host_data(),
tensor_A.stride()[0],
tensor_A.extent());
cutlass::TensorView<ElementB, cutlass::layout::RowMajor> tensor_B_physical(
tensor_B.host_data(),
tensor_B.stride()[0],
tensor_B.extent());
std::cout <<"cutlass::sizeof_bits<ElementA>::value = "<<cutlass::sizeof_bits<ElementA>::value<<"\n";
std::cout
<< "A:\n" << tensor_A.host_view() << "\n\n"
<< "A(physical - stride: " << tensor_A.stride()[0] << ", extent: " << tensor_A.extent() << "):\n" << tensor_A_physical << "\n\n";
std::cout <<"cutlass::sizeof_bits<ElementB>::value = "<<cutlass::sizeof_bits<ElementB>::value<<"\n";
std::cout
<< "B:\n" << tensor_B.host_view() << "\n\n"
<< "B(physical - stride: " << tensor_B.stride()[0] << ", extent: " << tensor_B.extent() <<"):\n" << tensor_B_physical << "\n\n";
std::cout
<< "C:\n" << tensor_C.host_view() << "\n\n"
<< "Reference:\n" << tensor_D_reference.host_view() << "\n\n"
<< "Computed:\n" << tensor_D_computed.host_view() << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test kernel
template <typename Mma, typename ThreadblockShape>
__global__ void kernel_transform(
typename Mma::ElementC *output_C,
typename Mma::ElementA const *input_A,
typename Mma::ElementB const *input_B,
typename Mma::ElementC const *input_C,
int iterations = 1) {
// Use AlignedBuffer to store trivially copyable objects in unions and __shared__ buffers.
__shared__ cutlass::AlignedBuffer<
typename Mma::ElementA, ThreadblockShape::kM * ThreadblockShape::kK> smem_buffer_A;
__shared__ cutlass::AlignedBuffer<
typename Mma::ElementB, ThreadblockShape::kN * ThreadblockShape::kK> smem_buffer_B;
if (threadIdx.x == 0) {
typename Mma::ElementA *smem_ptr_A = smem_buffer_A.data();
#pragma unroll 1
for (size_t i = 0; i < smem_buffer_A.size(); ++i) {
cutlass::ReferenceFactory<typename Mma::ElementA>::get(smem_ptr_A, i) =
cutlass::ReferenceFactory<typename cutlass::platform::remove_const<
typename Mma::ElementA>::type>::get(input_A, i);
}
typename Mma::ElementB *smem_ptr_B = smem_buffer_B.data();
#pragma unroll 1
for (size_t i = 0; i < smem_buffer_B.size(); ++i) {
cutlass::ReferenceFactory<typename Mma::ElementB>::get(smem_ptr_B, i) =
cutlass::ReferenceFactory<typename cutlass::platform::remove_const<
typename Mma::ElementB>::type>::get(input_B, i);
}
}
__syncthreads();
//
// Construct warp-level matrix product
//
using FragmentA = typename Mma::FragmentA;
using FragmentB = typename Mma::FragmentB;
using FragmentC = typename Mma::FragmentC;
using TransformedFragmentA = typename Mma::TransformedFragmentA;
using TransformedFragmentB = typename Mma::TransformedFragmentB;
typename Mma::LayoutA layout_A = Mma::LayoutA::packed({ThreadblockShape::kM, ThreadblockShape::kK});
typename Mma::LayoutB layout_B = Mma::LayoutB::packed({ThreadblockShape::kK, ThreadblockShape::kN});
typename Mma::LayoutC layout_C = Mma::LayoutC::packed({Mma::Shape::kM, Mma::Shape::kN});
typename Mma::IteratorA iter_A({smem_buffer_A.data(), layout_A}, cutlass::arch::LaneId());
typename Mma::IteratorB iter_B({smem_buffer_B.data(), layout_B}, cutlass::arch::LaneId());
FragmentA loaded_frag_A;
FragmentB loaded_frag_B;
TransformedFragmentA transformed_frag_A;
TransformedFragmentB transformed_frag_B;
FragmentC accum;
Mma mma;
accum.clear();
CUTLASS_PRAGMA_NO_UNROLL
for (int iter = 0; iter < iterations; ++iter) { // place in loop that is not unrolled
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < ThreadblockShape::kK;
k += Mma::Policy::MmaShape::kK) {
iter_A.load(loaded_frag_A);
iter_B.load(loaded_frag_B);
++iter_A;
++iter_B;
mma.transform(transformed_frag_A, transformed_frag_B, loaded_frag_A,
loaded_frag_B);
mma(accum, transformed_frag_A, transformed_frag_B, accum);
}
}
typename Mma::IteratorC iter_C({output_C, layout_C}, cutlass::arch::LaneId());
iter_C.store(accum);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Warp-level matrix multiply-accumulate
typename Mma_,
/// Size of threadblock-scoped shape used to store SMEM
typename ThreadblockShape_,
/// The innter product operation performed by GEMM
typename Operator_ = cutlass::arch::OpMultiplyAdd
>
struct TransformTestbed {
/// Thread-level matrix multiply-accumulate operator
using Mma = Mma_;
using ThreadblockShape = ThreadblockShape_;
using Operator = Operator_;
using Shape = typename Mma::Shape;
using ElementA = typename Mma::ElementA;
using LayoutA = typename Mma::LayoutA;
using ElementB = typename Mma::ElementB;
using LayoutB = typename Mma::LayoutB;
using ElementC = typename Mma::ElementC;
using LayoutC = typename Mma::LayoutC;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
//
// Methods
//
/// Allocates workspace in device memory
TransformTestbed() {
tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK));
tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN));
tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.major == 9) {
// NVIDIA Hopper drops support for several data types
if (
cutlass::sizeof_bits<ElementA>::value < 8 ||
cutlass::sizeof_bits<ElementB>::value < 8 ||
cutlass::sizeof_bits<ElementC>::value < 8) {
return false;
}
}
return true;
}
/// Runs the test
bool run(
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) {
if (!sufficient()) {
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementA>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
tensor_A.host_view(), seed, scope_max, scope_min, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_A.host_data(),
tensor_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementB>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementB>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
tensor_B.host_view(), seed + 16, scope_max, scope_min, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_B.host_data(),
tensor_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(
tensor_C.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_computed.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_reference.host_view(),
ElementC(0)
);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
// launch kernel
kernel_transform<Mma, ThreadblockShape><<<dim3(1, 1), dim3(32, 1, 1)>>>(
tensor_D_computed.device_data(), tensor_A.device_data(),
tensor_B.device_data(), tensor_C.device_data());
// verify no errors
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result);
if (result != cudaSuccess) {
return false;
}
tensor_D_computed.sync_host();
//
// Reference implementation
//
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC,
Operator>
reference_gemm;
reference_gemm(
{Shape::kM, Shape::kN, ThreadblockShape::kK},
ElementC(1),
tensor_A.host_ref(),
tensor_B.host_ref(),
ElementC(0),
tensor_D_reference.host_ref()
);
//
// Verify equivalence
//
// compare
bool passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
cutlass::TensorView<ElementA, cutlass::layout::ColumnMajor> tensor_A_physical(
tensor_A.host_data(),
tensor_A.stride()[0],
tensor_A.extent());
cutlass::TensorView<ElementB, cutlass::layout::RowMajor> tensor_B_physical(
tensor_B.host_data(),
tensor_B.stride()[0],
tensor_B.extent());
std::cout <<"cutlass::sizeof_bits<ElementA>::value = "<<cutlass::sizeof_bits<ElementA>::value<<"\n";
std::cout
<< "A:\n" << tensor_A.host_view() << "\n\n"
<< "A(physical - stride: " << tensor_A.stride()[0] << ", extent: " << tensor_A.extent() << "):\n" << tensor_A_physical << "\n\n";
std::cout <<"cutlass::sizeof_bits<ElementB>::value = "<<cutlass::sizeof_bits<ElementB>::value<<"\n";
std::cout
<< "B:\n" << tensor_B.host_view() << "\n\n"
<< "B(physical - stride: " << tensor_B.stride()[0] << ", extent: " << tensor_B.extent() << "):\n" << tensor_B_physical << "\n\n";
std::cout
<< "C:\n" << tensor_C.host_view() << "\n\n"
<< "Reference:\n" << tensor_D_reference.host_view() << "\n\n"
<< "Computed:\n" << tensor_D_computed.host_view() << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Warp-level matrix multiply-accumulate
typename Mma_,
/// Size of threadblock-scoped shape used to store SMEM
typename ThreadblockShape_
>
struct TransformedTestbedComplex {
/// Thread-level matrix multiply-accumulate operator
using Mma = Mma_;
using ThreadblockShape = ThreadblockShape_;
using Shape = typename Mma::Shape;
using ElementA = typename Mma::ElementA;
using LayoutA = typename Mma::LayoutA;
using ElementB = typename Mma::ElementB;
using LayoutB = typename Mma::LayoutB;
using ElementC = typename Mma::ElementC;
using LayoutC = typename Mma::LayoutC;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
//
// Methods
//
/// Allocates workspace in device memory
TransformedTestbedComplex() {
tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK));
tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN));
tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.major == 9) {
// NVIDIA Hopper drops support for several data types
if (
cutlass::sizeof_bits<ElementA>::value < 8 ||
cutlass::sizeof_bits<ElementB>::value < 8 ||
cutlass::sizeof_bits<ElementC>::value < 8) {
return false;
}
}
return true;
}
/// Runs the test
bool run(
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) {
if (!sufficient()) {
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(tensor_A.host_view(),
seed, 8, -8, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_A.host_data(),
tensor_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(tensor_B.host_view(),
seed + 16, 8, -8, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_B.host_data(),
tensor_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(
tensor_C.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_computed.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_reference.host_view(),
ElementC(0)
);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
// launch kernel
kernel_transform<Mma, ThreadblockShape><<< dim3(1, 1), dim3(32, 1, 1) >>>(
tensor_D_computed.device_data(),
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data());
// verify no errors
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result);
if (result != cudaSuccess) {
return false;
}
tensor_D_computed.sync_host();
//
// Reference implementation
//
cutlass::reference::host::GemmComplex(
{Shape::kM, Shape::kN, ThreadblockShape::kK},
ElementC(1),
tensor_A.host_ref(),
Mma::kTransformA,
tensor_B.host_ref(),
Mma::kTransformB,
ElementC(0),
tensor_C.host_ref(),
tensor_D_reference.host_ref()
);
//
// Verify equivalence
//
// compare
bool passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
cutlass::TensorView<ElementA, cutlass::layout::ColumnMajor> tensor_A_physical(
tensor_A.host_data(),
tensor_A.stride()[0],
tensor_A.extent());
cutlass::TensorView<ElementB, cutlass::layout::RowMajor> tensor_B_physical(
tensor_B.host_data(),
tensor_B.stride()[0],
tensor_B.extent());
std::cout <<"cutlass::sizeof_bits<ElementA>::value = "<<cutlass::sizeof_bits<ElementA>::value<<"\n";
std::cout
<< "A:\n" << tensor_A.host_view() << "\n\n"
<< "A(physical - stride: " << tensor_A.stride()[0] << ", extent: " << tensor_A.extent() << "):\n" << tensor_A_physical << "\n\n";
std::cout <<"cutlass::sizeof_bits<ElementB>::value = "<<cutlass::sizeof_bits<ElementB>::value<<"\n";
std::cout
<< "B:\n" << tensor_B.host_view() << "\n\n"
<< "B(physical - stride: " << tensor_B.stride()[0] << ", extent: " << tensor_B.extent() <<"):\n" << tensor_B_physical << "\n\n";
std::cout
<< "C:\n" << tensor_C.host_view() << "\n\n"
<< "Reference:\n" << tensor_D_reference.host_view() << "\n\n"
<< "Computed:\n" << tensor_D_computed.host_view() << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test kernel
template <typename Mma, typename ThreadblockShape>
__global__ void sparse_kernel(
typename Mma::ElementC *output_C,
typename Mma::ElementA const *input_A,
typename Mma::ElementB const *input_B,
typename Mma::ElementC const *input_C,
typename Mma::ElementE const *input_E,
int iterations = 1) {
// Use AlignedBuffer to store trivially copyable objects in unions and __shared__ buffers.
__shared__ cutlass::AlignedBuffer<typename Mma::ElementA,
ThreadblockShape::kM *
ThreadblockShape::kK / Mma::kSparse>
smem_buffer_A;
__shared__ cutlass::AlignedBuffer<
typename Mma::ElementB, ThreadblockShape::kN * ThreadblockShape::kK> smem_buffer_B;
__shared__ cutlass::AlignedBuffer<
typename Mma::ElementE, Mma::Shape::kM * Mma::Shape::kK /
Mma::kSparse / Mma::kElementsPerElementE>
smem_buffer_E;
__syncthreads();
if (threadIdx.x == 0) {
typename Mma::ElementA *smem_ptr_A = smem_buffer_A.data();
#pragma unroll 1
for (size_t i = 0; i < smem_buffer_A.size(); ++i) {
cutlass::ReferenceFactory<typename Mma::ElementA>::get(smem_ptr_A, i) =
cutlass::ReferenceFactory<typename cutlass::platform::remove_const<
typename Mma::ElementA>::type>::get(input_A, i);
}
typename Mma::ElementB *smem_ptr_B = smem_buffer_B.data();
#pragma unroll 1
for (size_t i = 0; i < smem_buffer_B.size(); ++i) {
cutlass::ReferenceFactory<typename Mma::ElementB>::get(smem_ptr_B, i) =
cutlass::ReferenceFactory<typename cutlass::platform::remove_const<
typename Mma::ElementB>::type>::get(input_B, i);
}
typename Mma::ElementE *smem_ptr_E = smem_buffer_E.data();
#pragma unroll 1
for (size_t i = 0; i < smem_buffer_E.size(); ++i) {
cutlass::ReferenceFactory<typename Mma::ElementE>::get(smem_ptr_E, i) =
cutlass::ReferenceFactory<typename cutlass::platform::remove_const<
typename Mma::ElementE>::type>::get(input_E, i);
}
}
__syncthreads();
//
// Construct warp-level matrix product
//
using FragmentA = typename Mma::FragmentA;
using FragmentB = typename Mma::FragmentB;
using FragmentC = typename Mma::FragmentC;
using FragmentE = typename Mma::FragmentE;
typename Mma::LayoutA layout_A = Mma::LayoutA::packed(
{ThreadblockShape::kM, ThreadblockShape::kK / Mma::kSparse});
typename Mma::LayoutB layout_B =
Mma::LayoutB::packed({ThreadblockShape::kK, ThreadblockShape::kN});
typename Mma::LayoutC layout_C = Mma::LayoutC::packed({Mma::Shape::kM, Mma::Shape::kN});
typename Mma::LayoutE layout_E =
Mma::LayoutE::packed({Mma::Shape::kM * Mma::kInterleaved,
Mma::Shape::kK / Mma::kSparse /
Mma::kElementsPerElementE / Mma::kInterleaved});
typename Mma::IteratorA iter_A({smem_buffer_A.data(), layout_A}, cutlass::arch::LaneId());
typename Mma::IteratorB iter_B({smem_buffer_B.data(), layout_B}, cutlass::arch::LaneId());
typename Mma::IteratorE iter_E({smem_buffer_E.data(), layout_E}, cutlass::arch::LaneId());
FragmentA frag_A;
FragmentB frag_B;
FragmentC accum;
FragmentE frag_E;
Mma mma;
accum.clear();
CUTLASS_PRAGMA_NO_UNROLL
for (int iter = 0; iter < iterations; ++iter) { // place in loop that is not unrolled
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < ThreadblockShape::kK;
k += Mma::Policy::MmaShape::kK) {
iter_A.load(frag_A);
iter_B.load(frag_B);
iter_E.load(frag_E);
++iter_A;
++iter_B;
++iter_E;
mma(accum, frag_A, frag_B, accum, frag_E);
}
}
typename Mma::IteratorC iter_C({output_C, layout_C}, cutlass::arch::LaneId());
iter_C.store(accum);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Warp-level matrix multiply-accumulate
typename Mma_,
/// Size of threadblock-scoped shape used to store SMEM
typename ThreadblockShape_,
/// The innter product operation performed by GEMM
typename Operator_ = cutlass::arch::OpMultiplyAdd
>
struct SparseTestbed {
/// Thread-level matrix multiply-accumulate operator
using Mma = Mma_;
using ThreadblockShape = ThreadblockShape_;
using Operator = Operator_;
using Shape = typename Mma::Shape;
using ElementA = typename Mma::ElementA;
using LayoutA = typename Mma::LayoutA;
using ElementB = typename Mma::ElementB;
using LayoutB = typename Mma::LayoutB;
using ElementC = typename Mma::ElementC;
using LayoutC = typename Mma::LayoutC;
static int const Sparse = Mma::kSparse;
static int const MetaSizeInBits = Mma::kMetaSizeInBits;
static int const MaxID2 = Mma::kMaxID2;
static int const Interleaved = Mma::kInterleaved;
using ElementE = typename Mma::ElementE;
static int const ElementsPerElementE = Mma::kElementsPerElementE;
using LayoutE = cutlass::layout::RowMajor;
using ReorderedLayoutE =
cutlass::layout::ColumnMajorInterleaved<Interleaved>;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementA, LayoutA> tensor_A_uncompressed;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
cutlass::HostTensor<ElementE, LayoutE> tensor_E;
cutlass::HostTensor<ElementE, ReorderedLayoutE> tensor_E_reordered;
//
// Methods
//
/// Allocates workspace in device memory
SparseTestbed() {
tensor_A.reset(cutlass::make_Coord(ThreadblockShape::kM,
ThreadblockShape::kK / Sparse));
tensor_A_uncompressed.reset(
cutlass::make_Coord(ThreadblockShape::kM, ThreadblockShape::kK));
tensor_B.reset(cutlass::make_Coord(ThreadblockShape::kK, ThreadblockShape::kN));
tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
tensor_E.reset(cutlass::make_Coord(
Shape::kM, Shape::kK / Sparse / ElementsPerElementE));
tensor_E_reordered.reset(cutlass::make_Coord(
Shape::kM, Shape::kK / Sparse / ElementsPerElementE));
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.major == 9) {
// NVIDIA Hopper drops support for several data types
if (
cutlass::sizeof_bits<ElementA>::value < 8 ||
cutlass::sizeof_bits<ElementB>::value < 8 ||
cutlass::sizeof_bits<ElementC>::value < 8) {
return false;
}
}
return true;
}
/// Runs the test
bool run(
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_E = cutlass::Distribution::Uniform) {
if (!sufficient()) {
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementA>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
tensor_A.host_view(), seed, scope_max, scope_min, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_A.host_data(),
tensor_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementB>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementB>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
tensor_B.host_view(), seed + 16, scope_max, scope_min, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(tensor_B.host_data(),
tensor_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(tensor_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(
tensor_C.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_computed.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_reference.host_view(),
ElementC(0)
);
if (init_E == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomSparseMeta(
tensor_E.host_view(), seed, MetaSizeInBits);
} else if (init_E == cutlass::Distribution::Identity) {
uint32_t content = (MaxID2 == 1) ? 0x44444444 : 0x4444;
cutlass::reference::host::TensorFill(tensor_E.host_view(),
(ElementE)(content));
} else {
return false;
}
cutlass::reorder_meta(
tensor_E_reordered.host_ref(), tensor_E.host_ref(),
{Shape::kM, Shape::kN, Shape::kK / Sparse / ElementsPerElementE});
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
tensor_E_reordered.sync_device();
// launch kernel
sparse_kernel<Mma, ThreadblockShape><<< dim3(1, 1), dim3(32, 1, 1) >>>(
tensor_D_computed.device_data(),
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data(),
tensor_E_reordered.device_data());
// verify no errors
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result);
if (result != cudaSuccess) {
return false;
}
tensor_D_computed.sync_host();
//
// Reference implementation
//
cutlass::uncompress(tensor_A_uncompressed.host_ref(), tensor_A.host_ref(),
tensor_E.host_ref(), Shape::kM, Shape::kK);
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC,
Operator>
reference_gemm;
reference_gemm(
{Shape::kM, Shape::kN, ThreadblockShape::kK},
ElementC(1),
tensor_A_uncompressed.host_ref(),
tensor_B.host_ref(),
ElementC(0),
tensor_D_reference.host_ref()
);
//
// Verify equivalence
//
// compare
bool passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
std::cout <<"cutlass::sizeof_bits<ElementA>::value = "<<cutlass::sizeof_bits<ElementA>::value<<"\n";
std::cout << "A:\n" << tensor_A.host_view() << "\n\n";
std::cout <<"cutlass::sizeof_bits<ElementB>::value = "<<cutlass::sizeof_bits<ElementB>::value<<"\n";
std::cout << "B:\n" << tensor_B.host_view() << "\n\n";
std::cout <<"cutlass::sizeof_bits<ElementB>::value = "<<cutlass::sizeof_bits<ElementE>::value<<"\n";
std::cout << "E:\n" << tensor_E.host_view() << "\n\n";
std::cout
<< "C:\n" << tensor_C.host_view() << "\n\n"
<< "Reference:\n" << tensor_D_reference.host_view() << "\n\n"
<< "Computed:\n" << tensor_D_computed.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace test
| test/unit/gemm/warp/testbed.h/0 | {
"file_path": "test/unit/gemm/warp/testbed.h",
"repo_id": "test",
"token_count": 20479
} | 52 |
#pragma once
#define CUDA_INCLUDE_DIR "@CUDA_TOOLKIT_ROOT_DIR@/include"
| test/unit/nvrtc/thread/nvrtc_config.in/0 | {
"file_path": "test/unit/nvrtc/thread/nvrtc_config.in",
"repo_id": "test",
"token_count": 36
} | 53 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/reduction/kernel/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace reduction {
template <typename ReductionKernel>
__global__ void kernel_reduce_splitk(typename ReductionKernel::Params params) {
__shared__ typename ReductionKernel::SharedStorage shared_storage;
ReductionKernel reduction_op;
reduction_op(params, shared_storage);
}
template <typename ReductionKernel>
class ReduceSplitKTestbed {
public:
using ElementAccumulator = typename ReductionKernel::ElementAccumulator;
using ElementWorkspace = typename ReductionKernel::ElementWorkspace;
using ElementOutput = typename ReductionKernel::ElementOutput;
using Layout = cutlass::layout::RowMajor;
public:
cutlass::Distribution::Kind distribution_workspace;
cutlass::Distribution::Kind distribution_source;
uint64_t seed;
public:
/// Ctor
ReduceSplitKTestbed(
cutlass::Distribution::Kind distribution_workspace = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind distribution_source = cutlass::Distribution::Uniform,
uint64_t seed = 2019
):
distribution_workspace(distribution_workspace),
distribution_source(distribution_source),
seed(seed) {
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
cutlass::reference::host::TensorFillRandomUniform(view, seed, 8, -8, 0);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, -1);
} else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
} else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(),
view.capacity());
} else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Runs a single problem size
bool run(
cutlass::MatrixCoord problem_size,
int partitions,
ElementAccumulator alpha = 1,
ElementAccumulator beta = 0) {
cutlass::HostTensor<ElementWorkspace, Layout> workspace({
problem_size.row() * partitions,
problem_size.column()
});
cutlass::HostTensor<ElementOutput, Layout> source(problem_size);
cutlass::HostTensor<ElementOutput, Layout> destination(problem_size);
cutlass::HostTensor<ElementOutput, Layout> destination_reference(problem_size, false);
//
// Initialize
//
initialize_tensor(workspace.host_view(), distribution_workspace, seed);
initialize_tensor(source.host_view(), distribution_source, seed + 23);
cutlass::reference::host::TensorFill(destination.host_view());
workspace.sync_device();
source.sync_device();
destination.sync_device();
//
// Launch reduction kernel
//
dim3 block = ReductionKernel::block_shape();
dim3 grid = ReductionKernel::grid_shape(problem_size);
typename ReductionKernel::Params params(
problem_size,
partitions,
problem_size.row() * problem_size.column(),
workspace.device_ref(),
destination.device_ref(),
source.device_ref(),
{alpha, beta}
);
test::reduction::kernel_reduce_splitk<ReductionKernel><<< grid, block >>>(params);
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess)
<< "CUDA error: " << cudaGetErrorString(result);
destination.sync_host();
//
// Compute reference
//
for (int m = 0; m < problem_size.row(); ++m) {
for (int n = 0; n < problem_size.column(); ++n) {
ElementAccumulator accum = 0;
for (int k = 0; k < partitions; ++k) {
accum += ElementAccumulator(workspace.at({m + k * problem_size.row(), n}));
}
ElementAccumulator c = ElementAccumulator(source.at({m, n}));
destination_reference.at({m, n}) = ElementOutput(accum * alpha + beta * c);
}
}
//
// Compare
//
EXPECT_GT(cutlass::reference::host::TensorNorm(destination.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(destination_reference.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(
destination.host_view(), destination_reference.host_view());
EXPECT_TRUE(passed)
<< "Workspace =\n" << workspace.host_view() << "\n\n"
<< "\n"
<< "Reference =\n" << destination_reference.host_view() << "\n\n"
<< "Computed =\n" << destination.host_view() << "\n";
return passed;
}
/// Runs through a variety of test cases
bool run_all() {
cutlass::MatrixCoord problem_sizes[] = {
{8, 8},
{136, 72},
{248, 232},
};
int partition_counts[] = {
1,3,4,5,11
};
bool passed = false;
for (cutlass::MatrixCoord problem : problem_sizes) {
for (int partitions : partition_counts) {
passed = run(problem, partitions);
if (!passed) {
return false;
}
}
}
return passed;
}
};
} // namespace reduction
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Strictly F32 data
//
TEST(Reduction_ReduceSplitK, f32_f32_f32_1_1x32) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = float;
int const kN = 1;
using Shape = cutlass::MatrixShape<1, 32>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Vectorized access
//
TEST(Reduction_ReduceSplitK, f32_f32_f32_2_4x64) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = float;
int const kN = 2;
using Shape = cutlass::MatrixShape<4, 64>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Vectorized access
//
TEST(Reduction_ReduceSplitK, f32_f32_f16_2_4x64) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = cutlass::half_t;
int const kN = 2;
using Shape = cutlass::MatrixShape<4, 64>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Vectorized access
//
TEST(Reduction_ReduceSplitK, f32_f32_f16_8_4x64) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = cutlass::half_t;
int const kN = 8;
using Shape = cutlass::MatrixShape<4, 64>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/reduction/kernel/reduce_splitk.cu/0 | {
"file_path": "test/unit/reduction/kernel/reduce_splitk.cu",
"repo_id": "test",
"token_count": 3842
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "../common/cutlass_unit_test.h"
#include "cutlass/util/device_rmsnorm.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/constants.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
using ElementType = cutlass::half_t;
using Layout = cutlass::layout::RowMajor;
void rmsnorm_host(cutlass::MatrixCoord tensor_size,
cutlass::TensorRef<ElementType, Layout> output,
cutlass::TensorRef<ElementType, Layout> input,
cutlass::TensorRef<ElementType, Layout> weight,
float epsilon) {
const int M = tensor_size.row();
const int N = tensor_size.column();
for (int m = 0; m < M; ++m) {
float square_sum{0};
for (int n = 0; n < N; ++n) {
float inp = static_cast<float>(input.at({m, n}));
square_sum += inp * inp;
}
float sq_mean = square_sum / (float)N;
float sqrt_var = cutlass::fast_sqrt(sq_mean + epsilon);
for (int n = 0; n < N; ++n) {
float inp = static_cast<float>(input.at({m, n}));
float g = static_cast<float>(weight.at({0, n}));
float res_fp32 = inp / sqrt_var * g;
output.at({m, n}) = ElementType(res_fp32);
}
}
}
void run_test(int M, int N) {
cutlass::HostTensor<ElementType, Layout> input, output_ref, output, weight;
input.reset({M, N});
output.reset({M, N});
output_ref.reset({M, N});
weight.reset({1, N});
const unsigned seed = 2022;
cutlass::reference::host::TensorFillRandomUniform(input.host_view(),
seed,
ElementType(5),
ElementType(-5),
0);
cutlass::reference::host::TensorFillRandomUniform(weight.host_view(),
seed,
ElementType(5),
ElementType(-5),
0);
input.sync_device();
weight.sync_device();
rmsnorm_host({M, N}, output_ref.host_ref(), input.host_ref(), weight.host_ref(), (float)1e-5);
cutlass::rmsnorm({M, N}, output.device_ref(),
input.device_ref(), weight.device_ref(), NULL, (float)1e-5L);
output.sync_host();
float max_abs_diff = -1;
float mean_abs_diff = 0;
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
auto diff = abs(static_cast<float>(output_ref.at({m, n}) - output.at({m, n})));
mean_abs_diff += diff;
max_abs_diff = max(max_abs_diff, diff);
}
}
mean_abs_diff /= float(M * N);
EXPECT_TRUE(max_abs_diff < 0.001f && mean_abs_diff < 0.001f)
<< "Max absolute difference : " << max_abs_diff << "\n"
<< "Mean absolute difference: " << mean_abs_diff;
}
TEST(RMSNorm, 16x1024) {
run_test(16, 1024);
}
TEST(RMSNorm, 1x127) {
run_test(1, 127);
}
| test/unit/util/rms_norm.cu/0 | {
"file_path": "test/unit/util/rms_norm.cu",
"repo_id": "test",
"token_count": 1619
} | 55 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for all GEMM operation kinds in CUTLASS Library.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/gemm/device/gemm_sparse.h"
#include "cutlass/gemm/device/gemm_complex.h"
#include "cutlass/gemm/device/gemm_batched.h"
#include "cutlass/gemm/device/gemm_array.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/gemm/kernel/default_gemm_planar_complex_universal.h"
#include "cutlass/library/library.h"
#include "library_internal.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmOperationBase : public Operation {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = ElementC;
using LayoutD = LayoutC;
// assuming all tensors use same type for StrideIndex
using StrideIndex = typename Operator::LayoutA::Index;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
protected:
///
GemmDescription description_;
public:
/// Constructor
GemmOperationBase(char const *name = "unknown_gemm") {
description_.name = name;
description_.provider = Provider::kCUTLASS;
description_.kind = OperationKind::kGemm;
description_.gemm_kind = GemmKind::kGemm;
description_.tile_description.threadblock_shape = make_Coord(
Operator::ThreadblockShape::kM,
Operator::ThreadblockShape::kN,
Operator::ThreadblockShape::kK);
description_.tile_description.threadblock_stages = Operator::kStages;
description_.tile_description.warp_count = make_Coord(
Operator::GemmKernel::WarpCount::kM,
Operator::GemmKernel::WarpCount::kN,
Operator::GemmKernel::WarpCount::kK);
description_.tile_description.math_instruction.instruction_shape = make_Coord(
Operator::InstructionShape::kM,
Operator::InstructionShape::kN,
Operator::InstructionShape::kK);
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
description_.tile_description.math_instruction.opcode_class =
OpcodeClassMap<typename Operator::OperatorClass>::kId;
description_.tile_description.math_instruction.math_operation =
MathOperationMap<typename Operator::MathOperator>::kId;
description_.tile_description.minimum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMin;
description_.tile_description.maximum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMax;
description_.A = make_TensorDescription<ElementA, LayoutA>(Operator::kAlignmentA);
description_.B = make_TensorDescription<ElementB, LayoutB>(Operator::kAlignmentB);
description_.C = make_TensorDescription<ElementC, LayoutC>(Operator::kAlignmentC);
description_.D = make_TensorDescription<ElementD, LayoutD>(Operator::kAlignmentC);
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
description_.split_k_mode = SplitKMode::kNone;
description_.transform_A = ComplexTransformMap<Operator::kTransformA>::kId;
description_.transform_B = ComplexTransformMap<Operator::kTransformB>::kId;
}
/// Returns the description of the GEMM operation
virtual OperationDescription const & description() const {
return description_;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmOperation : public GemmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = ElementC;
using LayoutD = LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
GemmOperation(char const *name = "unknown_gemm"): GemmOperationBase<Operator_>(name) {
this->description_.gemm_kind = GemmKind::kGemm;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
GemmConfiguration const *configuration) {
operator_args.problem_size = configuration->problem_size;
operator_args.ref_A = {nullptr, configuration->lda};
operator_args.ref_B = {nullptr, configuration->ldb};
operator_args.ref_C = {nullptr, configuration->ldc};
operator_args.ref_D = {nullptr, configuration->ldd};
operator_args.split_k_slices = configuration->split_k_slices;
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
GemmArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else {
return Status::kErrorInvalidProblem;
}
operator_args.ref_A.reset(static_cast<ElementA const *>(arguments->A));
operator_args.ref_B.reset(static_cast<ElementB const *>(arguments->B));
operator_args.ref_C.reset(static_cast<ElementC const *>(arguments->C));
operator_args.ref_D.reset(static_cast<ElementD *>(arguments->D));
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
GemmConfiguration const *configuration =
static_cast<GemmConfiguration const *>(configuration_ptr);
GemmArguments const *arguments =
static_cast<GemmArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
return Operator::get_workspace_size(args);
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
return op->initialize(args, device_workspace, stream);
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<GemmArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args);
if (status != Status::kSuccess) {
return status;
}
return op->run(stream);
}
void print_operator_args(OperatorArguments &operator_args) const {
#if 0
std::cout << "GemmOperation::OperatorArguments" << std::endl;
std::cout << " problem_size: " << operator_args.problem_size.m() << ", "<< operator_args.problem_size.n() << "," << operator_args.problem_size.k() << std::endl;
std::cout << " alpha: " << operator_args.epilogue.alpha << std::endl;
std::cout << " alpha_ptr: " << operator_args.epilogue.alpha_ptr << std::endl;
std::cout << " beta: " << operator_args.epilogue.beta << std::endl;
std::cout << " beta_ptr: " << operator_args.epilogue.beta_ptr << std::endl;
std::cout << " ref_A.data(): " << operator_args.ref_A.data() << std::endl;
std::cout << " ref_A.stride: " << operator_args.ref_A.stride(0) << std::endl;
std::cout << " ref_B.data(): " << operator_args.ref_B.data() << std::endl;
std::cout << " ref_B.stride: " << operator_args.ref_B.stride(0) << std::endl;
std::cout << " ref_C.data(): " << operator_args.ref_C.data() << std::endl;
std::cout << " ref_C.stride: " << operator_args.ref_C.stride(0) << std::endl;
#endif
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmSparseOperation : public GemmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = ElementC;
using LayoutD = LayoutC;
using ElementE = typename Operator::ElementE;
using LayoutE = typename Operator::LayoutE;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
GemmSparseOperation(char const *name = "unknown_gemm"): GemmOperationBase<Operator_>(name) {
this->description_.kind = OperationKind::kSparseGemm;
this->description_.gemm_kind = GemmKind::kSparse;
this->description_.E = make_TensorDescription<ElementE, LayoutE>(Operator::kAlignmentE);
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
SparseGemmConfiguration const *configuration) {
operator_args.problem_size = configuration->problem_size;
operator_args.ref_A = {nullptr, configuration->lda};
operator_args.ref_B = {nullptr, configuration->ldb};
operator_args.ref_C = {nullptr, configuration->ldc};
operator_args.ref_D = {nullptr, configuration->ldd};
operator_args.ref_E = {nullptr, configuration->lde};
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
SparseGemmArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else {
return Status::kErrorInvalidProblem;
}
operator_args.ref_A.reset(static_cast<ElementA const *>(arguments->A));
operator_args.ref_B.reset(static_cast<ElementB const *>(arguments->B));
operator_args.ref_C.reset(static_cast<ElementC const *>(arguments->C));
operator_args.ref_D.reset(static_cast<ElementD *>(arguments->D));
operator_args.ref_E.reset(static_cast<ElementE const *>(arguments->E));
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
SparseGemmConfiguration const *configuration =
static_cast<SparseGemmConfiguration const *>(configuration_ptr);
SparseGemmArguments const *arguments =
static_cast<SparseGemmArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<SparseGemmConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
return Operator::get_workspace_size(args);
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<SparseGemmConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
return op->initialize(args, device_workspace, stream);
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<SparseGemmArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args);
if (status != Status::kSuccess) {
return status;
}
return op->run(stream);
}
void print_operator_args(OperatorArguments &operator_args) const {
#if 0
std::cout << "GemmOperation::OperatorArguments" << std::endl;
std::cout << " problem_size: " << operator_args.problem_size.m() << ", "<< operator_args.problem_size.n() << "," << operator_args.problem_size.k() << std::endl;
std::cout << " alpha: " << operator_args.epilogue.alpha << std::endl;
std::cout << " alpha_ptr: " << operator_args.epilogue.alpha_ptr << std::endl;
std::cout << " beta: " << operator_args.epilogue.beta << std::endl;
std::cout << " beta_ptr: " << operator_args.epilogue.beta_ptr << std::endl;
std::cout << " ref_A.data(): " << operator_args.ref_A.data() << std::endl;
std::cout << " ref_A.stride: " << operator_args.ref_A.stride(0) << std::endl;
std::cout << " ref_B.data(): " << operator_args.ref_B.data() << std::endl;
std::cout << " ref_B.stride: " << operator_args.ref_B.stride(0) << std::endl;
std::cout << " ref_C.data(): " << operator_args.ref_C.data() << std::endl;
std::cout << " ref_C.stride: " << operator_args.ref_C.stride(0) << std::endl;
#endif
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmUniversalOperation : public GemmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = ElementC;
using LayoutD = LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
GemmUniversalOperation(char const *name = "unknown_gemm"):
GemmOperationBase<Operator_>(name) {
this->description_.gemm_kind = GemmKind::kUniversal;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
GemmUniversalConfiguration const *configuration) {
operator_args.mode = configuration->mode;
operator_args.problem_size = configuration->problem_size;
operator_args.batch_count = configuration->batch_count;
operator_args.lda = (configuration->lda);
operator_args.ldb = (configuration->ldb);
operator_args.ldc = (configuration->ldc);
operator_args.ldd = (configuration->ldd);
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
GemmUniversalArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else {
return Status::kErrorInvalidProblem;
}
// update arguments
operator_args.ptr_A = arguments->A;
operator_args.ptr_B = arguments->B;
operator_args.ptr_C = arguments->C;
operator_args.ptr_D = arguments->D;
operator_args.batch_stride_A = arguments->batch_stride_A;
operator_args.batch_stride_B = arguments->batch_stride_B;
operator_args.batch_stride_C = arguments->batch_stride_C;
operator_args.batch_stride_D = arguments->batch_stride_D;
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
GemmUniversalConfiguration const *configuration =
static_cast<GemmUniversalConfiguration const *>(configuration_ptr);
GemmUniversalArguments const *arguments =
static_cast<GemmUniversalArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmUniversalConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
status = update_arguments_(
args,
static_cast<GemmUniversalArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return 0;
}
uint64_t size = Operator::get_workspace_size(args);
return size;
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmUniversalConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
status = op->initialize(args, device_workspace, stream);
return status;
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<GemmUniversalArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args);
if (status != Status::kSuccess) {
return status;
}
status = op->run(stream);
return status;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmPlanarComplexOperation : public GemmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = ElementC;
using LayoutD = LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
GemmPlanarComplexOperation(char const *name = "unknown_gemm"): GemmOperationBase<Operator_>(name) {
this->description_.gemm_kind = GemmKind::kPlanarComplex;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
GemmPlanarComplexConfiguration const *configuration) {
operator_args.mode = cutlass::gemm::GemmUniversalMode::kBatched;
operator_args.problem_size = configuration->problem_size;
operator_args.batch_count = configuration->batch_count;
operator_args.lda_real = configuration->lda_real;
operator_args.lda_imag = configuration->lda_imag;
operator_args.ldb_real = configuration->ldb_real;
operator_args.ldb_imag = configuration->ldb_imag;
operator_args.ldc_real = configuration->ldc_real;
operator_args.ldc_imag = configuration->ldc_imag;
operator_args.ldd_real = configuration->ldd_real;
operator_args.ldd_imag = configuration->ldd_imag;
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
GemmPlanarComplexArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<cutlass::complex<ElementCompute> const *>(arguments->alpha),
*static_cast<cutlass::complex<ElementCompute> const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<cutlass::complex<ElementCompute> const *>(arguments->alpha),
static_cast<cutlass::complex<ElementCompute> const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else {
return Status::kErrorInvalidProblem;
}
// update arguments
operator_args.ptr_A_real = arguments->A_real;
operator_args.ptr_A_imag = arguments->A_imag;
operator_args.ptr_B_real = arguments->B_real;
operator_args.ptr_B_imag = arguments->B_imag;
operator_args.ptr_C_real = arguments->C_real;
operator_args.ptr_C_imag = arguments->C_imag;
operator_args.ptr_D_real = arguments->D_real;
operator_args.ptr_D_imag = arguments->D_imag;
operator_args.batch_stride_A = arguments->batch_stride_A_real;
operator_args.batch_stride_A_imag = arguments->batch_stride_A_imag;
operator_args.batch_stride_B = arguments->batch_stride_B_real;
operator_args.batch_stride_B_imag = arguments->batch_stride_B_imag;
operator_args.batch_stride_C = arguments->batch_stride_C_real;
operator_args.batch_stride_C_imag = arguments->batch_stride_C_imag;
operator_args.batch_stride_D = arguments->batch_stride_D_real;
operator_args.batch_stride_D_imag = arguments->batch_stride_D_imag;
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
GemmPlanarComplexConfiguration const *configuration =
static_cast<GemmPlanarComplexConfiguration const *>(configuration_ptr);
GemmPlanarComplexArguments const *arguments =
static_cast<GemmPlanarComplexArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmPlanarComplexConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
uint64_t size = Operator::get_workspace_size(args);
return size;
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmPlanarComplexConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
status = op->initialize(args, device_workspace, stream);
return status;
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<GemmPlanarComplexArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args);
if (status != Status::kSuccess) {
return status;
}
status = op->run(stream);
return status;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmPlanarComplexArrayOperation : public GemmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = ElementC;
using LayoutD = LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
GemmPlanarComplexArrayOperation(char const *name = "unknown_gemm"): GemmOperationBase<Operator_>(name) {
this->description_.gemm_kind = GemmKind::kPlanarComplexArray;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
GemmPlanarComplexArrayConfiguration const *configuration) {
operator_args.mode = cutlass::gemm::GemmUniversalMode::kArray;
operator_args.problem_size = configuration->problem_size;
operator_args.batch_count = configuration->batch_count;
operator_args.lda_real = configuration->lda_real;
operator_args.lda_imag = configuration->lda_imag;
operator_args.ldb_real = configuration->ldb_real;
operator_args.ldb_imag = configuration->ldb_imag;
operator_args.ldc_real = configuration->ldc_real;
operator_args.ldc_imag = configuration->ldc_imag;
operator_args.ldd_real = configuration->ldd_real;
operator_args.ldd_imag = configuration->ldd_imag;
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
GemmPlanarComplexArrayArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<cutlass::complex<ElementCompute> const *>(arguments->alpha),
*static_cast<cutlass::complex<ElementCompute> const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<cutlass::complex<ElementCompute> const *>(arguments->alpha),
static_cast<cutlass::complex<ElementCompute> const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else {
return Status::kErrorInvalidProblem;
}
// update arguments
operator_args.ptr_A_real = arguments->A_real;
operator_args.ptr_A_imag = arguments->A_imag;
operator_args.ptr_B_real = arguments->B_real;
operator_args.ptr_B_imag = arguments->B_imag;
operator_args.ptr_C_real = arguments->C_real;
operator_args.ptr_C_imag = arguments->C_imag;
operator_args.ptr_D_real = arguments->D_real;
operator_args.ptr_D_imag = arguments->D_imag;
operator_args.ptr_M = arguments->M;
operator_args.ptr_N = arguments->N;
operator_args.ptr_K = arguments->K;
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
GemmPlanarComplexArrayConfiguration const *configuration =
static_cast<GemmPlanarComplexArrayConfiguration const *>(configuration_ptr);
GemmPlanarComplexArrayArguments const *arguments =
static_cast<GemmPlanarComplexArrayArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmPlanarComplexArrayConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
uint64_t size = Operator::get_workspace_size(args);
return size;
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmPlanarComplexArrayConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
status = op->initialize(args, device_workspace, stream);
return status;
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<GemmPlanarComplexArrayArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args);
if (status != Status::kSuccess) {
return status;
}
status = op->run(stream);
return status;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmGroupedOperation : public GemmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = ElementC;
using LayoutD = LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
GemmGroupedOperation(char const *name = "unknown_gemm"):
GemmOperationBase<Operator_>(name) {
this->description_.gemm_kind = GemmKind::kGrouped;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &op_args,
GemmGroupedConfiguration const *config) {
op_args.problem_count = config->problem_count;
op_args.threadblock_count = config->threadblock_count;
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &op_args,
GemmGroupedArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
op_args.output_op = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice) {
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
op_args.output_op = params;
}
else {
return Status::kErrorInvalidProblem;
}
op_args.problem_sizes = arguments->problem_sizes;
op_args.ptr_A = static_cast<ElementA **>(arguments->ptr_A);
op_args.ptr_B = static_cast<ElementB **>(arguments->ptr_B);
op_args.ptr_C = static_cast<ElementC **>(arguments->ptr_C);
op_args.ptr_D = static_cast<ElementD **>(arguments->ptr_D);
op_args.lda = arguments->lda;
op_args.ldb = arguments->ldb;
op_args.ldc = arguments->ldc;
op_args.ldd = arguments->ldd;
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
GemmGroupedConfiguration const *configuration =
static_cast<GemmGroupedConfiguration const *>(configuration_ptr);
GemmGroupedArguments const *arguments =
static_cast<GemmGroupedArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmGroupedConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
status = update_arguments_(
args,
static_cast<GemmGroupedArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return 0;
}
uint64_t size = Operator::get_workspace_size(args);
return size;
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmGroupedConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
status = op->initialize(args, device_workspace, stream);
return status;
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<GemmGroupedArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args);
if (status != Status::kSuccess) {
return status;
}
status = op->run(stream);
return status;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/gemm_operation.h/0 | {
"file_path": "tools/library/src/gemm_operation.h",
"repo_id": "tools",
"token_count": 14489
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for all TRMM operation kinds in CUTLASS Library.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/trmm.h"
#include "cutlass/gemm/kernel/default_trmm_universal.h"
#include "cutlass/gemm/kernel/trmm_universal.h"
#include "cutlass/library/library.h"
#include "library_internal.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class TrmmOperationBase : public Operation {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
static SideMode const kSideMode = Operator::kSideMode;
static FillMode const kFillMode = Operator::kFillMode;
static DiagType const kDiagType = Operator::kDiagType;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
protected:
///
TrmmDescription description_;
public:
/// Constructor
TrmmOperationBase(char const *name = "unknown_trmm") {
description_.name = name;
description_.provider = Provider::kCUTLASS;
description_.kind = OperationKind::kTrmm;
description_.trmm_kind = TrmmKind::kUniversal;
description_.side_mode = kSideMode;
description_.fill_mode = kFillMode;
description_.diag_type = kDiagType;
description_.tile_description.threadblock_shape = make_Coord(
Operator::ThreadblockShape::kM,
Operator::ThreadblockShape::kN,
Operator::ThreadblockShape::kK);
description_.tile_description.threadblock_stages = Operator::kStages;
description_.tile_description.warp_count = make_Coord(
Operator::TrmmKernel::WarpCount::kM,
Operator::TrmmKernel::WarpCount::kN,
Operator::TrmmKernel::WarpCount::kK);
description_.tile_description.math_instruction.instruction_shape = make_Coord(
Operator::InstructionShape::kM,
Operator::InstructionShape::kN,
Operator::InstructionShape::kK);
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
description_.tile_description.math_instruction.opcode_class =
OpcodeClassMap<typename Operator::OperatorClass>::kId;
description_.tile_description.math_instruction.math_operation =
MathOperationMap<typename Operator::Operator>::kId;
description_.tile_description.minimum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMin;
description_.tile_description.maximum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMax;
description_.A = make_TensorDescription<ElementA, LayoutA>(Operator::kAlignmentA);
description_.B = make_TensorDescription<ElementB, LayoutB>(Operator::kAlignmentB);
description_.D = make_TensorDescription<ElementC, LayoutC>(Operator::kAlignmentC);
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
description_.split_k_mode = SplitKMode::kNone;
description_.transform_A = ComplexTransformMap<Operator::kTransformA>::kId;
}
/// Returns the description of the TRMM operation
virtual OperationDescription const & description() const {
return description_;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class TrmmOperation : public TrmmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
static SideMode const kSideMode = Operator::kSideMode;
static FillMode const kFillMode = Operator::kFillMode;
static DiagType const kDiagType = Operator::kDiagType;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
TrmmOperation(char const *name = "unknown_trmm"):
TrmmOperationBase<Operator_>(name) {
this->description_.trmm_kind = TrmmKind::kUniversal;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
TrmmConfiguration const *configuration) {
//operator_args.mode = configuration->mode;
operator_args.problem_size = configuration->problem_size;
operator_args.batch_count = configuration->batch_count;
operator_args.lda = int(configuration->lda);
operator_args.ldb = int(configuration->ldb);
operator_args.ldd = int(configuration->ldd);
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
TrmmArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else {
return Status::kErrorInvalidProblem;
}
// update arguments
operator_args.ptr_A = arguments->A;
operator_args.ptr_B = arguments->B;
operator_args.batch_stride_A = arguments->batch_stride_A;
operator_args.batch_stride_B = arguments->batch_stride_B;
operator_args.ptr_D = arguments->D;
operator_args.batch_stride_D = arguments->batch_stride_D;
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
TrmmConfiguration const *configuration =
static_cast<TrmmConfiguration const *>(configuration_ptr);
TrmmArguments const *arguments =
static_cast<TrmmArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<TrmmConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
uint64_t size = Operator::get_workspace_size(args);
return size;
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<TrmmConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
status = op->initialize(args, device_workspace, stream);
return status;
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<TrmmArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
bool need_swapped_matrices = (kSideMode == SideMode::kLeft &&
std::is_same<typename Operator::LayoutC, layout::ColumnMajor>::value) ||
(kSideMode == SideMode::kRight &&
std::is_same<typename Operator::LayoutC, layout::RowMajor>::value);
if (need_swapped_matrices) {
status = op->update(args.swapped_matrices(), device_workspace);
} else {
status = op->update(args, device_workspace);
}
if (status != Status::kSuccess) {
return status;
}
status = op->run(stream);
return status;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/trmm_operation.h/0 | {
"file_path": "tools/library/src/trmm_operation.h",
"repo_id": "tools",
"token_count": 3752
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Class performing output during profiling
*/
#pragma once
#include <vector>
#include <fstream>
// CUTLASS Profiler includes
#include "options.h"
#include "enumerated_types.h"
#include "performance_result.h"
// CUTLASS Library includes
#include "cutlass/library/library.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
class PerformanceReport {
private:
/// Reference to options
Options const &options_;
/// Operation kind
library::OperationKind op_kind_;
/// Operation file name containing performance report of op_kind
std::string op_file_name_;
/// Output file containing results
std::ofstream output_file_;
/// Operation file name containing junit performance report of op_kind
std::string op_junit_file_name_;
/// Output file containing junit results
std::ofstream junit_output_file_;
/// Flag indicating the performance report is valid
bool good_;
/// Vector of argument names
std::vector<std::string> argument_names_;
/// Counter uniquely identifying problem within the report
size_t problem_index_;
/// Collection of all results
PerformanceResultVector concatenated_results_;
public:
PerformanceReport(Options const &options, std::vector<std::string> const &argument_names, library::OperationKind const &op_kind);
~PerformanceReport();
bool good() const { return good_; }
void next_problem();
void append_result(PerformanceResult result);
void sort_results(PerformanceResultVector &results);
void append_results(PerformanceResultVector const &results);
public:
/// Prints the CSV header
std::ostream & print_csv_header_(std::ostream &out);
/// Prints the CSV
std::ostream & print_result_csv_(std::ostream &out, PerformanceResult const &result);
/// @defgroup jUnit Result Generation
/// Functions related to generation of the jUnit results
/// @{
std::ostream & print_junit_header_(std::ostream &out);
std::ostream & print_junit_result_(std::ostream &out, PerformanceResult const &result);
std::ostream & print_junit_footer_(std::ostream &out);
/// @}
/// Prints the result in human readable form
std::ostream & print_result_pretty_(
std::ostream &out,
PerformanceResult const &result,
bool use_shell_coloring = true);
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
| tools/profiler/include/cutlass/profiler/performance_report.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/performance_report.h",
"repo_id": "tools",
"token_count": 1135
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Provides several functions for filling tensors with data.
*/
#include "cutlass/profiler/enumerated_types.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
ExecutionMode enumerant;
}
ExecutionMode_enumerants[] = {
{"profile", "Profile", ExecutionMode::kProfile},
{"dry_run", "Dry run", ExecutionMode::kDryRun},
{"dry", "dry run", ExecutionMode::kDryRun},
{"trace", "Trace", ExecutionMode::kTrace},
{"enumerate", "Enumerate", ExecutionMode::kEnumerate}
};
/// Converts a ExecutionMode enumerant to a string
char const *to_string(ExecutionMode mode, bool pretty) {
for (auto const & possible : ExecutionMode_enumerants) {
if (mode == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Parses a ExecutionMode enumerant from a string
template <>
ExecutionMode from_string<ExecutionMode>(std::string const &str) {
for (auto const & possible : ExecutionMode_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return ExecutionMode::kInvalid;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
AlgorithmMode enumerant;
}
AlgorithmMode_enumerants[] = {
{"matching", "Matching", AlgorithmMode::kMatching},
{"best", "Best", AlgorithmMode::kBest},
{"default", "Default", AlgorithmMode::kDefault}
};
/// Converts a ExecutionMode enumerant to a string
char const *to_string(AlgorithmMode mode, bool pretty) {
for (auto const & possible : AlgorithmMode_enumerants) {
if (mode == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Parses a ExecutionMode enumerant from a string
template <>
AlgorithmMode from_string<AlgorithmMode>(std::string const &str) {
for (auto const & possible : AlgorithmMode_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return AlgorithmMode::kInvalid;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
Disposition enumerant;
}
Disposition_enumerants[] = {
{"passed", "Passed", Disposition::kPassed},
{"failed", "Failed", Disposition::kFailed},
{"not_run", "Not run", Disposition::kNotRun},
{"not_verified", "Not verified", Disposition::kNotVerified},
{"invalid_problem", "Invalid problem", Disposition::kInvalidProblem},
{"not_supported", "Not supported", Disposition::kNotSupported},
{"incorrect", "Incorrect", Disposition::kIncorrect}
};
/// Converts a Disposition enumerant to a string
char const *to_string(Disposition disposition, bool pretty) {
for (auto const & possible : Disposition_enumerants) {
if (disposition == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Parses a Disposition enumerant from a string
template <>
Disposition from_string<Disposition>(std::string const &str) {
for (auto const & possible : Disposition_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return Disposition::kInvalid;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
SaveWorkspace enumerant;
}
SaveWorkspace_enumerants[] = {
{"never", "Never", SaveWorkspace::kNever},
{"incorrect", "Incorrect", SaveWorkspace::kIncorrect},
{"always", "Always", SaveWorkspace::kAlways}
};
/// Converts a SaveWorkspace enumerant to a string
char const *to_string(SaveWorkspace save_option, bool pretty) {
for (auto const & possible : SaveWorkspace_enumerants) {
if (save_option == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Parses a SaveWorkspace enumerant from a string
template <>
SaveWorkspace from_string<SaveWorkspace>(std::string const &str) {
for (auto const & possible : SaveWorkspace_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return SaveWorkspace::kInvalid;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
ArgumentTypeID enumerant;
}
ArgumentTypeID_enumerants[] = {
{"scalar", "Scalar", ArgumentTypeID::kScalar},
{"int", "Integer", ArgumentTypeID::kInteger},
{"tensor", "Tensor", ArgumentTypeID::kTensor},
{"batched_tensor", "BatchedTensor", ArgumentTypeID::kBatchedTensor},
{"struct", "Struct", ArgumentTypeID::kStructure},
{"enum", "Enumerated type", ArgumentTypeID::kEnumerated}
};
/// Converts a ArgumentTypeID enumerant to a string
char const *to_string(ArgumentTypeID type, bool pretty) {
for (auto const & possible : ArgumentTypeID_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Parses a ArgumentTypeID enumerant from a string
template <>
ArgumentTypeID from_string<ArgumentTypeID>(std::string const &str) {
for (auto const & possible : ArgumentTypeID_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return ArgumentTypeID::kInvalid;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/enumerated_types.cpp/0 | {
"file_path": "tools/profiler/src/enumerated_types.cpp",
"repo_id": "tools",
"token_count": 2582
} | 59 |
/******************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* Utility for parsing command line arguments
*/
#include <iostream>
#include <limits>
#include <sstream>
#include <string>
#include <vector>
#include <cuda_runtime.h>
#include "cutlass/cutlass.h"
namespace cutlass {
/******************************************************************************
* command_line
******************************************************************************/
/**
* Utility for parsing command line arguments
*/
struct CommandLine {
std::vector<std::string> keys;
std::vector<std::string> values;
std::vector<std::string> args;
/**
* Constructor
*/
CommandLine(int argc, const char** argv) {
using namespace std;
for (int i = 1; i < argc; i++) {
string arg = argv[i];
if ((arg[0] != '-') || (arg[1] != '-')) {
args.push_back(arg);
continue;
}
string::size_type pos;
string key, val;
if ((pos = arg.find('=')) == string::npos) {
key = string(arg, 2, arg.length() - 2);
val = "";
} else {
key = string(arg, 2, pos - 2);
val = string(arg, pos + 1, arg.length() - 1);
}
keys.push_back(key);
values.push_back(val);
}
}
/**
* Checks whether a flag "--<flag>" is present in the commandline
*/
bool check_cmd_line_flag(const char* arg_name) const {
using namespace std;
for (int i = 0; i < int(keys.size()); ++i) {
if (keys[i] == string(arg_name)) return true;
}
return false;
}
/**
* Returns number of naked (non-flag and non-key-value) commandline parameters
*/
size_t num_naked_args() const {
return args.size();
}
/**
* Print naked (non-flag and non-key-value) commandline parameters
*/
void print_naked_args(std::ostream &out) const {
for (auto arg : args) {
out << " " << arg <<"\n";
}
}
/**
* Returns the commandline parameter for a given index (not including flags)
*/
template <typename value_t>
void get_cmd_line_argument(size_t index, value_t& val) const {
using namespace std;
if (index < args.size()) {
istringstream str_stream(args[index]);
str_stream >> val;
}
}
/**
* Obtains the boolean value specified for a given commandline parameter --<flag>=<bool>
*/
void get_cmd_line_argument(const char* arg_name, bool& val, bool _default) const {
val = _default;
if (check_cmd_line_flag(arg_name)) {
std::string value;
get_cmd_line_argument(arg_name, value);
val = !(value == "0" || value == "false");
}
}
/**
* Obtains the value specified for a given commandline parameter --<flag>=<value>
*/
template <typename value_t>
void get_cmd_line_argument(const char* arg_name,
value_t& val) const {
get_cmd_line_argument(arg_name, val, val);
}
/**
* Obtains the value specified for a given commandline parameter --<flag>=<value>
*/
template <typename value_t>
void get_cmd_line_argument(const char* arg_name,
value_t& val,
value_t const& _default) const {
using namespace std;
val = _default;
for (int i = 0; i < int(keys.size()); ++i) {
if (keys[i] == string(arg_name)) {
istringstream str_stream(values[i]);
str_stream >> val;
}
}
}
/**
* Returns the values specified for a given commandline parameter --<flag>=<value>,<value>*
*/
template <typename value_t>
void get_cmd_line_arguments(const char* arg_name,
std::vector<value_t>& vals,
char sep = ',') const {
using namespace std;
if (check_cmd_line_flag(arg_name)) {
// Clear any default values
vals.clear();
// Recover from multi-value string
for (size_t i = 0; i < keys.size(); ++i) {
if (keys[i] == string(arg_name)) {
string val_string(values[i]);
separate_string(val_string, vals, sep);
}
}
}
}
/**
* Returns the values specified for a given commandline parameter
* --<flag>=<value>,<value_start:value_end>*
*/
void get_cmd_line_argument_pairs(const char* arg_name,
std::vector<std::pair<std::string, std::string> >& tokens,
char delim = ',',
char sep = ':') const {
if (check_cmd_line_flag(arg_name)) {
std::string value;
get_cmd_line_argument(arg_name, value);
tokenize(tokens, value, delim, sep);
}
}
/**
* Returns a list of ranges specified for a given commandline parameter
* --<flag>=<key:value>,<key:value>*
*/
void get_cmd_line_argument_ranges(const char* arg_name,
std::vector<std::vector<std::string> >& vals,
char delim = ',',
char sep = ':') const {
std::vector<std::string> ranges;
get_cmd_line_arguments(arg_name, ranges, delim);
for (std::vector<std::string>::const_iterator range = ranges.begin();
range != ranges.end(); ++range) {
std::vector<std::string> range_vals;
separate_string(*range, range_vals, sep);
vals.push_back(range_vals);
}
}
/**
* The number of pairs parsed
*/
int parsed_argc() const { return (int)keys.size(); }
//-------------------------------------------------------------------------
// Utility functions
//-------------------------------------------------------------------------
/// Tokenizes a comma-delimited list of string pairs delimited by ':'
static void tokenize(std::vector<std::pair<std::string, std::string> >& tokens,
std::string const& str,
char delim = ',',
char sep = ':') {
// Home-built to avoid Boost dependency
size_t s_idx = 0;
size_t d_idx = std::string::npos;
while (s_idx < str.size()) {
d_idx = str.find_first_of(delim, s_idx);
size_t end_idx = (d_idx != std::string::npos ? d_idx : str.size());
size_t sep_idx = str.find_first_of(sep, s_idx);
size_t offset = 1;
if (sep_idx == std::string::npos || sep_idx >= end_idx) {
sep_idx = end_idx;
offset = 0;
}
std::pair<std::string, std::string> item(
str.substr(s_idx, sep_idx - s_idx),
str.substr(sep_idx + offset, end_idx - sep_idx - offset));
tokens.push_back(item);
s_idx = end_idx + 1;
}
}
/// Tokenizes a comma-delimited list of string pairs delimited by ':'
static void tokenize(std::vector<std::string>& tokens,
std::string const& str,
char delim = ',',
char sep = ':') {
typedef std::vector<std::pair<std::string, std::string> > TokenVector;
typedef TokenVector::const_iterator token_iterator;
std::vector<std::pair<std::string, std::string> > token_pairs;
tokenize(token_pairs, str, delim, sep);
for (token_iterator tok = token_pairs.begin(); tok != token_pairs.end(); ++tok) {
tokens.push_back(tok->first);
}
}
template <typename value_t>
static void separate_string(std::string const& str,
std::vector<value_t>& vals,
char sep = ',') {
std::istringstream str_stream(str);
std::string::size_type old_pos = 0;
std::string::size_type new_pos = 0;
// Iterate <sep>-delimited values
value_t val;
while ((new_pos = str.find(sep, old_pos)) != std::string::npos) {
if (new_pos != old_pos) {
str_stream.width(new_pos - old_pos);
str_stream >> val;
vals.push_back(val);
}
// skip over delimiter
str_stream.ignore(1);
old_pos = new_pos + 1;
}
// Read last value
str_stream >> val;
vals.push_back(val);
}
};
} // namespace cutlass
| tools/util/include/cutlass/util/command_line.h/0 | {
"file_path": "tools/util/include/cutlass/util/command_line.h",
"repo_id": "tools",
"token_count": 3938
} | 60 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cuda.h>
#include <cute/util/debug.hpp>
namespace cute
{
void
device_init(int device_id, bool quiet = false)
{
cudaDeviceProp device_prop;
std::size_t device_free_physmem;
std::size_t device_total_physmem;
CUTE_CHECK_ERROR(cudaSetDevice(device_id));
CUTE_CHECK_ERROR(cudaMemGetInfo(&device_free_physmem, &device_total_physmem));
CUTE_CHECK_ERROR(cudaGetDeviceProperties(&device_prop, device_id));
if (device_prop.major < 1) {
fprintf(stderr, "Device does not support CUDA.\n");
exit(1);
}
//float device_giga_bandwidth = float(device_prop.memoryBusWidth) * device_prop.memoryClockRate * 2 / 8 / 1000 / 1000;
if (!quiet) {
printf("Using device %d: %s (SM%d, %d SMs)\n",
device_id, device_prop.name,
device_prop.major * 10 + device_prop.minor,
device_prop.multiProcessorCount);
fflush(stdout);
}
}
/**
* Convert the SM version (e.g. v7.0, v7.5) to the physical number of cores.
*/
inline int
_ConvertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct {
int SM; // 0xMm (hexadecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf("MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
} // end namespace cute
| tools/util/include/cutlass/util/helper_cuda.hpp/0 | {
"file_path": "tools/util/include/cutlass/util/helper_cuda.hpp",
"repo_id": "tools",
"token_count": 1388
} | 61 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <curand_kernel.h>
#include "cutlass/cutlass.h"
namespace cutlass {
namespace reference {
namespace device {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize tensor to uniform random distribution
template <typename T>
__global__ void TensorInitializeUniform(
Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) {
__shared__ curandState_t rng_state[1024];
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
curand_init(seed, gtid, 0, &rng_state[threadIdx.x]);
int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
int s_idx = blockIdx.y * blockDim.x;
tensor += s_idx * ldm + c_idx;
for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) {
if (s_idx < dim_strided && c_idx < dim_contiguous) {
double range = dist.uniform.max - dist.uniform.min;
double rnd = curand_uniform(&rng_state[threadIdx.x]);
rnd = dist.uniform.min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
if (dist.int_scale >= 0) {
rnd = double(int(rnd * double(1 << dist.int_scale)));
*tensor = T(rnd / double(1 << dist.int_scale));
} else {
*tensor = T(rnd);
}
tensor += ldm;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize tensor to uniform distribution
template <typename T>
__global__ void TensorInitializeGaussian(
Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) {
__shared__ curandState_t rng_state[1024];
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
curand_init(seed, gtid, 0, &rng_state[threadIdx.x]);
int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
int s_idx = blockIdx.y * blockDim.x;
tensor += s_idx * ldm + c_idx;
for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) {
if (s_idx < dim_strided && c_idx < dim_contiguous) {
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
double rnd = curand_normal(&rng_state[threadIdx.x]);
rnd = dist.gaussian.mean + dist.gaussian.stddev * rnd;
if (dist.int_scale >= 0) {
rnd = double(int(rnd * double(1 << dist.int_scale)));
*tensor = T(rnd / double(1 << dist.int_scale));
} else {
*tensor = T(rnd);
}
}
}
}
/// Kernel to initialize tensor to an identity matrix
template <typename T>
__global__ void TensorInitializeLinear(
Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) {
__shared__ curandState_t rng_state[1024];
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
curand_init(seed, gtid, 0, &rng_state[threadIdx.x]);
int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
int s_idx = blockIdx.y * blockDim.x;
tensor += s_idx * ldm + c_idx;
for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) {
if (s_idx < dim_strided && c_idx < dim_contiguous) {
*tensor =
dist.linear.offset + dist.linear.delta_row * c_idx + dist.linear.delta_column * s_idx;
}
}
}
/// Kernel to initialize tensor to an identity matrix
template <typename T>
__global__ void TensorInitializeIdentity(
Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) {
__shared__ curandState_t rng_state[1024];
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
curand_init(seed, gtid, 0, &rng_state[threadIdx.x]);
int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
int s_idx = blockIdx.y * blockDim.x;
tensor += s_idx * ldm + c_idx;
for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) {
if (s_idx < dim_strided && c_idx < dim_contiguous) {
*tensor = (c_idx == s_idx ? T(1) : T(0));
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace device
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/device/kernel/tensor_elementwise.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/device/kernel/tensor_elementwise.h",
"repo_id": "tools",
"token_count": 2183
} | 62 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for Rank 2k update in host-side code.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/mma.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
namespace cutlass {
namespace reference {
namespace host {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
FillMode FillModeC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_rank2k(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2,
"Tensors must be of rank 2");
static_assert(
FillModeC == FillMode::kLower ||
FillModeC == FillMode::kUpper,
"Fill Mode can either be Lower or Upper.");
using CompareOp = typename platform::conditional<(FillModeC == FillMode::kLower),
std::greater_equal<int>,
std::less_equal<int>>::type;
// Note: batch is ignored.
// Note: M is same as N for Rank 2k update
int const N = problem_size.n();
int const K = problem_size.k();
// Blocking necessary to speedup reference implementation
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
CompareOp compare_op;
for (int row_block = 0; row_block < N; row_block += Nblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Nblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Nblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Nblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < N && col < N && compare_op(row, col))
{
// A x B^T
ElementA a = tensor_a.at(MatrixCoord(row, k_block));
ElementB b_t = tensor_b.at(MatrixCoord(col, k_block));
ComputeType compute_a(cast_if_scalar<ComputeType>(a));
ComputeType compute_b_t(cast_if_scalar<ComputeType>(b_t));
accum[i][j] = inner_product_op(compute_a, compute_b_t, accum[i][j]);
// B x A^T
ElementB b = tensor_b.at(MatrixCoord(row, k_block));
ElementA a_t = tensor_a.at(MatrixCoord(col, k_block));
ComputeType compute_b(cast_if_scalar<ComputeType>(b));
ComputeType compute_a_t(cast_if_scalar<ComputeType>(a_t));
accum[i][j] = inner_product_op(compute_b, compute_a_t, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Nblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < N && col < N &&
( (FillModeC == FillMode::kLower && row >= col) ||
(FillModeC == FillMode::kUpper && row <= col) )
) {
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[i][j]) +
beta * ScalarType(tensor_c.at(coord)));
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general Rank 2k update (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
FillMode FillModeC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_rank2k(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum) {
compute_rank2k<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC,
ScalarType, ComputeType, InnerProductOp, ConvertOp>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c,
initial_accum);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
FillMode FillModeC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = cutlass::arch::OpMultiplyAdd
>
struct Rank2K;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC, FillMode FillModeC,
typename ScalarType, typename ComputeType>
struct Rank2K<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC, ScalarType,
ComputeType, arch::OpMultiplyAdd> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_rank2k<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_rank2k<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/host/rank_2k.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/rank_2k.h",
"repo_id": "tools",
"token_count": 3649
} | 63 |
cff-version: 1.2.0
title: CUTLASS
message: >-
If you use this software, please cite using the
following metadata.
type: software
authors:
- given-names: Vijay
family-names: Thakkar
email: [email protected]
affiliation: NVIDIA
- given-names: Pradeep
family-names: Ramani
email: [email protected]
affiliation: NVIDIA
- given-names: Cris
family-names: Cecka
email: [email protected]
affiliation: NVIDIA
- given-names: Aniket
family-names: Shivam
email: [email protected]
affiliation: NVIDIA
- given-names: Honghao
family-names: Lu
email: [email protected]
affiliation: NVIDIA
- given-names: Ethan
family-names: Yan
email: [email protected]
affiliation: NVIDIA
- given-names: Jack
family-names: Kosaian
email: [email protected]
affiliation: NVIDIA
- given-names: Mark
family-names: Hoemmen
email: [email protected]
affiliation: NVIDIA
- given-names: Haicheng
family-names: Wu
email: [email protected]
affiliation: NVIDIA
- given-names: Andrew
family-names: Kerr
email: [email protected]
affiliation: NVIDIA
- given-names: Matt
family-names: Nicely
email: [email protected]
affiliation: NVIDIA
- given-names: Duane
family-names: Merrill
email: [email protected]
affiliation: NVIDIA
- given-names: Dustyn
family-names: Blasig
email: [email protected]
affiliation: NVIDIA
- given-names: Fengqi
family-names: Qiao
email: [email protected]
affiliation: NVIDIA
- given-names: Piotr
family-names: Majcher
email: [email protected]
affiliation: NVIDIA
- given-names: Paul
family-names: Springer
email: [email protected]
affiliation: NVIDIA
- given-names: Markus
family-names: Hohnerbach
affiliation: NVIDIA
email: [email protected]
- given-names: Jin
family-names: Wang
email: [email protected]
affiliation: NVIDIA
- given-names: Manish
family-names: Gupta
affiliation: Google
email: [email protected]
repository-code: 'https://github.com/NVIDIA/cutlass'
abstract: >-
CUTLASS is a collection of CUDA C++ template
abstractions for implementing high-performance
matrix-multiplication (GEMM) and related
computations at all levels and scales within CUDA.
It incorporates strategies for hierarchical
decomposition and data movement similar to those
used to implement cuBLAS and cuDNN. CUTLASS
decomposes these "moving parts" into reusable,
modular software components abstracted by C++
template classes. These thread-wide, warp-wide,
block-wide, and device-wide primitives can be
specialized and tuned via custom tiling sizes, data
types, and other algorithmic policy. The resulting
flexibility simplifies their use as building blocks
within custom kernels and applications.
keywords:
- 'cutlass, tensor cores, cuda, cute, nvidia, gpu, linear algebra, matrix computations'
license: BSD-3-Clause
license-url: https://github.com/NVIDIA/cutlass/blob/v3.0.0/LICENSE.txt
version: '3.0.0'
date-released: '2023-01-23'
identifiers:
- type: url
value: "https://github.com/NVIDIA/cutlass/tree/v3.0.0"
description: The GitHub release URL of tag 3.0.0
| CITATION.cff/0 | {
"file_path": "CITATION.cff",
"repo_id": "CITATION.cff",
"token_count": 1162
} | 0 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Containers for running grouped back-to-back GEMMs
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/util/device_memory.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/device/tensor_relu.h"
#include "reference/device/tensor_scale_bias.h"
#include "helper.h"
#define CHECK_GT(val1, val2) \
if((val1) <= (val2)) \
std::cerr << __FILE__ << " " << __LINE__ << ": CHECK_GT failed\n";
#define CHECK_TRUE(val) \
if(!(val)) \
std::cerr << __FILE__ << " " << __LINE__ << ": CHECK_TRUE failed\n";
////////////////////////////////////////////////////////////////////////////////
template <typename B2bGemm_>
struct B2bFusedGroupedGemmRun
{
using B2bGemm = B2bGemm_;
using ElementAccumulator = typename B2bGemm::ElementAccumulator;
using ElementCompute = typename B2bGemm::BaseKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
cutlass::Distribution::Kind init_Scale;
cutlass::Distribution::Kind init_Bias;
uint64_t seed;
//
// Methods
//
B2bFusedGroupedGemmRun(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_Scale_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_Bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_),
init_Scale(init_Scale_), init_Bias(init_Bias_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
cutlass::reference::host::TensorFillRandomUniform(
view, seed, 1, -1, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else if (dist_kind == cutlass::Distribution::AllZeros) {
cutlass::reference::host::TensorFill(view, Element(0));
}
else if (dist_kind == cutlass::Distribution::AllOnes) {
cutlass::reference::host::TensorFill(view, Element(1));
}
else {
std::cerr << "Not implemented\n";
return false;
}
return true;
}
/// Executes one test
bool run(
std::vector<cutlass::gemm::GemmCoord> problem_sizes_0,
std::vector<cutlass::gemm::GemmCoord> problem_sizes_1,
ElementCompute alpha0 = ElementCompute(1),
ElementCompute beta0 = ElementCompute(0),
ElementCompute alpha1 = ElementCompute(1),
ElementCompute beta1 = ElementCompute(0),
bool relu = true,
int warm_ups = 1,
int runs = 100) {
using HostTensorA = cutlass::HostTensor<typename B2bGemm::ElementA, typename B2bGemm::LayoutA>;
using HostTensorB = cutlass::HostTensor<typename B2bGemm::ElementB, typename B2bGemm::LayoutB>;
using HostTensorC = cutlass::HostTensor<typename B2bGemm::ElementC, typename B2bGemm::LayoutC>;
using HostTensorScale = cutlass::HostTensor<ElementCompute, typename B2bGemm::LayoutC>;
using HostTensorZ = cutlass::HostTensor<ElementAccumulator, typename B2bGemm::LayoutC>;
using HostTensorBias = cutlass::HostTensor<ElementCompute, typename B2bGemm::LayoutC>;
int problem_count = (int)problem_sizes_0.size();
std::vector<HostTensorA> host_tensor_A0(problem_count);
std::vector<HostTensorB> host_tensor_B0(problem_count);
std::vector<HostTensorC> host_tensor_C0(problem_count);
std::vector<HostTensorScale> host_tensor_Scale0(problem_count);
std::vector<HostTensorScale> host_tensor_Bias0(problem_count);
std::vector<HostTensorB> host_tensor_B1(problem_count);
std::vector<HostTensorC> host_tensor_C1(problem_count);
std::vector<HostTensorBias> host_tensor_Bias1(problem_count);
std::vector<HostTensorC> host_tensor_D1(problem_count);
std::vector<HostTensorZ> host_tensor_Z(problem_count);
std::vector<HostTensorC> host_tensor_ref_D0(problem_count);
std::vector<HostTensorC> host_tensor_ref_D1(problem_count);
std::vector<typename HostTensorA::TensorRef> ref_A0(problem_count);
std::vector<typename HostTensorB::TensorRef> ref_B0(problem_count);
std::vector<typename HostTensorC::TensorRef> ref_C0(problem_count);
std::vector<typename HostTensorScale::TensorRef> ref_Scale0(problem_count);
std::vector<typename HostTensorScale::TensorRef> ref_Bias0(problem_count);
std::vector<typename HostTensorB::TensorRef> ref_B1(problem_count);
std::vector<typename HostTensorC::TensorRef> ref_C1(problem_count);
std::vector<typename HostTensorBias::TensorRef> ref_Bias1(problem_count);
std::vector<typename HostTensorC::TensorRef> ref_D1(problem_count);
std::vector<typename HostTensorZ::TensorRef> ref_Z(problem_count);
std::vector<typename HostTensorC::TensorRef> ref_ref_D0(problem_count);
std::vector<typename HostTensorC::TensorRef> ref_ref_D1(problem_count);
for (int i = 0; i < problem_count; ++i) {
//
// Allocate the GEMM workspace
//
auto problem_size_0 = problem_sizes_0[i];
auto problem_size_1 = problem_sizes_1[i];
host_tensor_A0.at(i) = HostTensorA(problem_size_0.mk());
host_tensor_B0.at(i) = HostTensorB(problem_size_0.kn());
host_tensor_C0.at(i) = HostTensorC(problem_size_0.mn());
if (alpha0 == ElementCompute(0)) //per-channel scale
host_tensor_Scale0.at(i) = HostTensorScale(typename HostTensorZ::Layout::TensorCoord{1, problem_size_0.n()});
host_tensor_Bias0.at(i) = HostTensorScale(typename HostTensorBias::Layout::TensorCoord{1, problem_size_0.n()});
host_tensor_Z.at(i) = HostTensorZ(problem_size_0.mn());
host_tensor_ref_D0.at(i) = HostTensorC(problem_size_0.mn());
host_tensor_B1.at(i) = HostTensorB(problem_size_1.kn());
host_tensor_C1.at(i) = HostTensorC(problem_size_1.mn());
host_tensor_Bias1.at(i) = HostTensorScale(typename HostTensorBias::Layout::TensorCoord{1, problem_size_1.n()});
host_tensor_D1.at(i) = HostTensorC(problem_size_1.mn());
host_tensor_ref_D1.at(i) = HostTensorC(problem_size_1.mn());
CHECK_TRUE(initialize_tensor(host_tensor_A0.at(i).host_view(), init_A, seed + 2019));
CHECK_TRUE(initialize_tensor(host_tensor_B0.at(i).host_view(), init_B, seed + 2018));
CHECK_TRUE(initialize_tensor(host_tensor_C0.at(i).host_view(), init_C, seed + 2017));
if (alpha0 == ElementCompute(0)) //per-channel scale
CHECK_TRUE(initialize_tensor(host_tensor_Scale0.at(i).host_view(), init_Scale, seed + 2014));
CHECK_TRUE(initialize_tensor(host_tensor_Bias0.at(i).host_view(), init_Bias, seed + 2013));
CHECK_TRUE(initialize_tensor(host_tensor_B1.at(i).host_view(), init_B, seed + 2016));
CHECK_TRUE(initialize_tensor(host_tensor_C1.at(i).host_view(), init_C, seed + 2015));
CHECK_TRUE(initialize_tensor(host_tensor_Bias1.at(i).host_view(), init_Bias, seed + 2012));
cutlass::reference::host::TensorFill(
host_tensor_D1.at(i).host_view());
cutlass::reference::host::TensorFill(
host_tensor_ref_D0.at(i).host_view());
cutlass::reference::host::TensorFill(
host_tensor_ref_D1.at(i).host_view());
host_tensor_A0.at(i).sync_device();
host_tensor_B0.at(i).sync_device();
host_tensor_C0.at(i).sync_device();
if (alpha0 == ElementCompute(0)) //per-channel scale
host_tensor_Scale0.at(i).sync_device();
host_tensor_Bias0.at(i).sync_device();
host_tensor_B1.at(i).sync_device();
host_tensor_C1.at(i).sync_device();
host_tensor_Bias1.at(i).sync_device();
host_tensor_D1.at(i).sync_device();
host_tensor_ref_D0.at(i).sync_device();
host_tensor_ref_D1.at(i).sync_device();
ref_A0.at(i) = (host_tensor_A0.at(i).device_ref());
ref_B0.at(i) = (host_tensor_B0.at(i).device_ref());
ref_C0.at(i) = (host_tensor_C0.at(i).device_ref());
if (alpha0 == ElementCompute(0)) //per-channel scale
ref_Scale0.at(i) = (host_tensor_Scale0.at(i).device_ref());
ref_Bias0.at(i) = (host_tensor_Bias0.at(i).device_ref());
ref_B1.at(i) = (host_tensor_B1.at(i).device_ref());
ref_C1.at(i) = {host_tensor_Bias1.at(i).device_data(), typename B2bGemm::LayoutC::Stride(0)};
ref_Bias1.at(i) = (host_tensor_Bias1.at(i).device_ref());
ref_D1.at(i) = (host_tensor_D1.at(i).device_ref());
ref_Z.at(i) = (host_tensor_Z.at(i).device_ref());
ref_ref_D0.at(i) = (host_tensor_ref_D0.at(i).device_ref());
ref_ref_D1.at(i) = (host_tensor_ref_D1.at(i).device_ref());
}
//
// Initialize the GEMM operator
//
cutlass::DeviceAllocation<typename HostTensorA::TensorRef> device_ref_A0(problem_count);
device_ref_A0.copy_from_host(ref_A0.data());
cutlass::DeviceAllocation<typename HostTensorB::TensorRef> device_ref_B0(problem_count);
device_ref_B0.copy_from_host(ref_B0.data());
cutlass::DeviceAllocation<typename HostTensorC::TensorRef> device_ref_C0(problem_count);
device_ref_C0.copy_from_host(ref_C0.data());
cutlass::DeviceAllocation<typename HostTensorScale::TensorRef> device_ref_Scale0(problem_count);
device_ref_Scale0.copy_from_host(ref_Scale0.data());
cutlass::DeviceAllocation<typename HostTensorScale::TensorRef> device_ref_Bias0(problem_count);
device_ref_Bias0.copy_from_host(ref_Bias0.data());
cutlass::DeviceAllocation<typename HostTensorB::TensorRef> device_ref_B1(problem_count);
device_ref_B1.copy_from_host(ref_B1.data());
cutlass::DeviceAllocation<typename HostTensorC::TensorRef> device_ref_C1(problem_count);
device_ref_C1.copy_from_host(ref_C1.data());
cutlass::DeviceAllocation<typename HostTensorBias::TensorRef> device_ref_Bias1(problem_count);
device_ref_Bias1.copy_from_host(ref_Bias1.data());
cutlass::DeviceAllocation<typename HostTensorC::TensorRef> device_ref_D1(problem_count);
device_ref_D1.copy_from_host(ref_D1.data());
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> device_problem_sizes_0(problem_count);
device_problem_sizes_0.copy_from_host(problem_sizes_0.data());
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> device_problem_sizes_1(problem_count);
device_problem_sizes_1.copy_from_host(problem_sizes_1.data());
B2bGemm b2b_gemm_op;
int threadblock_count = B2bGemm::sufficient(problem_sizes_1.data(), problem_count);
if (!threadblock_count) {
std::cout << "Active CUDA device lacks hardware resources to run CUTLASS Grouped GEMM kernel." << std::endl;
return false;
}
typename B2bGemm::Arguments arguments{
problem_count,
device_problem_sizes_0.get(),
device_problem_sizes_1.get(),
device_ref_A0.get(),
device_ref_B0.get(),
device_ref_C0.get(),
device_ref_Scale0.get(),
device_ref_Bias0.get(),
device_ref_B1.get(),
device_ref_C1.get(),
device_ref_D1.get(),
{alpha0, beta0},
{alpha1, beta1},
threadblock_count
};
cutlass::Status status = b2b_gemm_op.can_implement(arguments);
if(status != cutlass::Status::kSuccess) {
std::cout << "Problem sizes not supported.\n"
<< "Requirments:\n"
<< " problem_size_0.M = problem_size_1.M\n"
<< " problem_size_0.N = problem_size_1.K\n"
<< " ThreadblockShape0::kN = problem_size_0.N\n"
<< " ThreadblockShape1::kN = problem_size_1.N" << std::endl;
}
status = b2b_gemm_op.initialize(arguments);
CUTLASS_CHECK(status);
for(int i = 0; i < warm_ups; i++) {
status = b2b_gemm_op();
CUTLASS_CHECK(status);
}
//
// Run the GEMM
//
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for(int i = 0; i < runs; i++) {
status = b2b_gemm_op();
CUTLASS_CHECK(status);
}
cudaEventRecord(stop);
cudaDeviceSynchronize();
float gemmTime;
cudaEventElapsedTime(&gemmTime, start, stop);
std::cout << "Fusion time " << gemmTime / (float)runs << " ms\n";
for (int i = 0; i < problem_count; ++i) {
host_tensor_D1.at(i).sync_host();
//
// Verify
//
cutlass::reference::device::Gemm<
typename B2bGemm::ElementA, typename B2bGemm::LayoutA,
typename B2bGemm::ElementB, typename B2bGemm::LayoutB,
ElementAccumulator, typename B2bGemm::LayoutC,
ElementAccumulator, ElementAccumulator>
reference_gemm_0;
cutlass::reference::device::Gemm<
typename B2bGemm::ElementA, typename B2bGemm::LayoutA,
typename B2bGemm::ElementB, typename B2bGemm::LayoutB,
typename B2bGemm::ElementC, typename B2bGemm::LayoutC, ElementCompute,
ElementAccumulator>
reference_gemm_1;
auto problem_size_0 = problem_sizes_0[i];
auto problem_size_1 = problem_sizes_1[i];
reference_gemm_0(
problem_size_0,
ElementAccumulator(1), //intermediate alpha=1
ref_A0.at(i),
ref_B0.at(i),
ElementAccumulator(0), //beta = 0
ref_Z.at(i),
ref_Z.at(i),
ElementAccumulator(0)
);
cutlass::reference::device::TensorScaleBiasGemm<
ElementAccumulator, typename B2bGemm::ElementC, typename B2bGemm::LayoutC,
ElementCompute, typename B2bGemm::LayoutC
> (
problem_size_0,
ref_Z.at(i),
ref_ref_D0.at(i),
alpha0,
ref_Scale0.at(i),
ref_Bias0.at(i)
);
if(relu) {
cutlass::reference::device::TensorReLu(host_tensor_ref_D0.at(i).device_view());
}
reference_gemm_1(
problem_size_1,
alpha1,
ref_ref_D0.at(i),
ref_B1.at(i),
beta1,
{host_tensor_Bias1.at(i).device_data(), typename B2bGemm::LayoutC::Stride(0)},
ref_ref_D1.at(i)
);
if(relu) {
cutlass::reference::device::TensorReLu(host_tensor_ref_D1.at(i).device_view());
}
cudaDeviceSynchronize();
host_tensor_ref_D0.at(i).sync_host();
host_tensor_ref_D1.at(i).sync_host();
CHECK_GT(cutlass::reference::host::TensorNorm(host_tensor_ref_D0.at(i).host_view()), 0);
CHECK_GT(cutlass::reference::host::TensorNorm(host_tensor_D1.at(i).host_view()), 0);
CHECK_GT(cutlass::reference::host::TensorNorm(host_tensor_ref_D1.at(i).host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(
host_tensor_ref_D1.at(i).host_view(),
host_tensor_D1.at(i).host_view());
CHECK_TRUE(passed);
if (!passed)
{
std::stringstream fname;
fname << "error_B2bGemm_device_fused.txt";
std::cerr << "Check failed for GEMM " << i << " in the group." << std::endl;
std::cerr << "Dumping results in " << fname.str() << "\n";
std::ofstream file(fname.str());
file
<< "GEMM " << i << " in group\n"
<< "A0 =\n" << host_tensor_A0.at(i).host_view()
<< "\nB0 =\n" << host_tensor_B0.at(i).host_view()
<< "\nC0 =\n" << host_tensor_C0.at(i).host_view()
<< "\nScale0:\n" << host_tensor_Scale0.at(i).host_view() << "\n"
<< "\nBias0:\n" << host_tensor_Bias0.at(i).host_view() << "\n"
<< "\nB1 =\n" << host_tensor_B1.at(i).host_view()
<< "\nC1 =\n" << host_tensor_C1.at(i).host_view()
<< "\nBias1:\n" << host_tensor_Bias1.at(i).host_view() << "\n"
<< "\n\nReference =\n" << host_tensor_ref_D1.at(i).host_view()
<< "\nComputed =\n" << host_tensor_D1.at(i).host_view();
return false;
}
}
return true;
}
};
////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/b2b_grouped_gemm_run.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/b2b_grouped_gemm_run.h",
"repo_id": "examples",
"token_count": 8048
} | 1 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief High-level interface for running a grouped version of a CUTLASS kernel
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// High-level interface for running a grouped version of a CUTLASS kernel
template <
typename BaseKernel_ ///! Kernel-scoped matrix multiply-accumulate
>
struct GroupedKernel {
public:
using BaseKernel = BaseKernel_;
using Epilogue = typename BaseKernel::Epilogue;
/// Types that need to be exported to work properly with device::BaseGrouped
using ElementA = typename BaseKernel::ElementA;
using LayoutA = typename BaseKernel::LayoutA;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
static ComplexTransform const kTransformA = BaseKernel::kTransformA;
static int const kAlignmentA = BaseKernel::kAlignmentA;
using ElementB = typename BaseKernel::ElementB;
using LayoutB = typename BaseKernel::LayoutB;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
static ComplexTransform const kTransformB = BaseKernel::kTransformB;
static int const kAlignmentB = BaseKernel::kAlignmentB;
using ElementC = typename BaseKernel::ElementC;
using LayoutC = typename BaseKernel::LayoutC;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
static int const kAlignmentC = BaseKernel::kAlignmentC;
using ElementAccumulator = typename BaseKernel::Mma::Policy::Operator::ElementC;
using EpilogueOutputOp = typename BaseKernel::EpilogueOutputOp;
using ThreadblockSwizzle = typename BaseKernel::ThreadblockSwizzle;
using Operator = typename BaseKernel::Operator;
using WarpMmaOperator = typename BaseKernel::Mma::Policy::Operator;
using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator;
using MathOperator = typename WarpMmaOperator::MathOperator;
using OperatorClass = typename WarpMmaOperator::OperatorClass;
using ArchTag = typename WarpMmaOperator::ArchTag;
using ThreadblockShape = typename BaseKernel::Mma::Shape;
using WarpShape = typename BaseKernel::WarpShape;
using InstructionShape = typename BaseKernel::InstructionShape;
static int const kStages = BaseKernel::Mma::kStages;
using Mma = typename BaseKernel::Mma;
using Arguments = typename BaseKernel::GroupedArguments;
using Params = typename BaseKernel::GroupedParams;
using ProblemVisitor = typename ThreadblockSwizzle::ProblemVisitor;
static int const kThreadCount = BaseKernel::kThreadCount;
/// Shared memory storage structure
struct SharedStorage {
typename BaseKernel::SharedStorage kernel;
// ProblemVisitor shared storage can't be overlapped with others
typename ProblemVisitor::SharedStorage problem_visitor;
};
public:
//
// Methods
//
CUTLASS_DEVICE
GroupedKernel() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) {
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return Status::kSuccess;
}
/// Executes a kernel-level GEMM in a loop
CUTLASS_DEVICE
void operator()(Params ¶ms, SharedStorage &shared_storage) {
ThreadblockSwizzle swizzle(params.problem_visitor, shared_storage.problem_visitor, blockIdx.x);
if (ProblemVisitor::kTransposed) {
params.transpose();
}
BaseKernel mma;
// Outer 'persistent' loop to iterate over tiles
while (swizzle.problem_visitor.next_tile()) {
typename BaseKernel::Params mma_params = params.to_single_params(swizzle.problem_visitor);
mma.run_with_swizzle(mma_params, shared_storage.kernel, swizzle);
// Next tile
swizzle.problem_visitor.advance(gridDim.x);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/kernel/grouped.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/kernel/grouped.h",
"repo_id": "examples",
"token_count": 1839
} | 2 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example adopts example 16 to use 3xTF32 to bring FP32 accuracy with 2x performance
compared with CUDA Cores. See example 27 for the trick of 3xTF32.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = float; // Data type of elements in input tensor
using ElementInputB = float; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 3;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
// 3xTF32 Fprop
using Conv2dFpropKernel_3xTF32 = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
// Only thing needs to be changed from normal Fprop
cutlass::arch::OpMultiplyAddFastF32,
IteratorAlgorithm
>::Kernel;
// 1xTF32 Fprop
using Conv2dFpropKernel_1xTF32 = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemm_3xTF32 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel_3xTF32>;
using ImplicitGemm_1xTF32 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel_1xTF32>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 4;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "28_ampere_3xtf32_fast_accurate_tensorop_fprop example\n\n"
<< " This example uses Ampere's Tensor Core operators on F16 data types to compute\n"
<< " forward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/28_ampere_3xtf32_fast_accurate_tensorop_fprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/28_ampere_3xtf32_fast_accurate_tensorop_fprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
double l2_norm_3xtf32_vs_fp64;
double l2_norm_1xtf32_vs_fp64;
double l2_norm_fp32_vs_fp64;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
error(cudaSuccess),
l2_norm_3xtf32_vs_fp64(0),
l2_norm_1xtf32_vs_fp64(0),
l2_norm_fp32_vs_fp64(0) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs,3xTF32_vs_FP64,1xTF32_vs_FP64,FP32_vs_FP64";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< runtime_ms << ","
<< gflops << ","
<< l2_norm_3xtf32_vs_fp64 << ","
<< l2_norm_1xtf32_vs_fp64 << ","
<< l2_norm_fp32_vs_fp64;
return out;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
////////////////////////////////////////////////////////////////////////////////
/// 1. Initialize F32 Precision input tensors using CUTLASS helper functions
////////////////////////////////////////////////////////////////////////////////
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<float, LayoutInputA> tensor_a_F32(options.input_size);
cutlass::HostTensor<float, LayoutInputB> tensor_b_F32(options.filter_size);
cutlass::HostTensor<float, LayoutOutput> tensor_c_F32(options.output_size());
cutlass::HostTensor<float, LayoutOutput> tensor_d_F32(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a_F32.host_view(),
1,
ElementInputA(7),
ElementInputA(-8));
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b_F32.host_view(),
1,
ElementInputB(7),
ElementInputB(-8));
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_F32.host_view(),
1,
ElementInputB(7),
ElementInputB(-8));
// Fill tensor D on host with zeros
cutlass::reference::host::TensorFill(
tensor_d_F32.host_view());
// Copy data from host to GPU
tensor_a_F32.sync_device();
tensor_b_F32.sync_device();
tensor_c_F32.sync_device();
tensor_d_F32.sync_device();
////////////////////////////////////////////////////////////////////////////////
/// 2. Initialize F32 Precision input tensors using CUTLASS helper functions
////////////////////////////////////////////////////////////////////////////////
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<double, LayoutInputA> tensor_a_F64(options.input_size);
cutlass::HostTensor<double, LayoutInputB> tensor_b_F64(options.filter_size);
cutlass::HostTensor<double, LayoutOutput> tensor_c_F64(options.output_size());
cutlass::HostTensor<double, LayoutOutput> tensor_d_F64(options.output_size());
cutlass::HostTensor<float, LayoutOutput> tensor_d_3xTF32(options.output_size());
cutlass::HostTensor<float, LayoutOutput> tensor_d_1xTF32(options.output_size());
// Copy values from the DP tensors
cutlass::reference::host::TensorCopy(tensor_a_F64.host_view(), tensor_a_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_b_F64.host_view(), tensor_b_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_c_F64.host_view(), tensor_c_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_F64.host_view(), tensor_d_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_3xTF32.host_view(), tensor_d_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_1xTF32.host_view(), tensor_d_F32.host_view());
// Copy data from host to GPU
tensor_a_F64.sync_device();
tensor_b_F64.sync_device();
tensor_c_F64.sync_device();
tensor_d_F64.sync_device();
tensor_d_3xTF32.sync_device();
tensor_d_1xTF32.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
////////////////////////////////////////////////////////////////////////////////
/// 3. Run 3xTF32 kernel within a profiling loop
////////////////////////////////////////////////////////////////////////////////
// Construct ImplicitGemm::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename ImplicitGemm_3xTF32::Arguments arguments_3xTF32{
problem_size,
tensor_a_F32.device_ref(),
tensor_b_F32.device_ref(),
tensor_c_F32.device_ref(),
tensor_d_3xTF32.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm_3xTF32 implicit_gemm_op_3xTF32;
size_t workspace_size_3xTF32 = implicit_gemm_op_3xTF32.get_workspace_size(arguments_3xTF32);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace_3xTF32(workspace_size_3xTF32);
result.status = implicit_gemm_op_3xTF32.can_implement(arguments_3xTF32);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op_3xTF32.initialize(arguments_3xTF32, workspace_3xTF32.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op_3xTF32();
CUTLASS_CHECK(result.status);
//
// Performance measurement
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op_3xTF32();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
tensor_d_3xTF32.sync_host();
////////////////////////////////////////////////////////////////////////////////
/// 4. Run 1xTF32 kernel within a profiling loop
////////////////////////////////////////////////////////////////////////////////
// Construct ImplicitGemm::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename ImplicitGemm_1xTF32::Arguments arguments_1xTF32{
problem_size,
tensor_a_F32.device_ref(),
tensor_b_F32.device_ref(),
tensor_c_F32.device_ref(),
tensor_d_1xTF32.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm_1xTF32 implicit_gemm_op_1xTF32;
size_t workspace_size_1xTF32 = implicit_gemm_op_1xTF32.get_workspace_size(arguments_1xTF32);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace_1xTF32(workspace_size_1xTF32);
result.status = implicit_gemm_op_1xTF32.can_implement(arguments_1xTF32);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op_1xTF32.initialize(arguments_1xTF32, workspace_1xTF32.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op_1xTF32();
CUTLASS_CHECK(result.status);
tensor_d_1xTF32.sync_host();
////////////////////////////////////////////////////////////////////////////////
// Run reference kernel (F64)
////////////////////////////////////////////////////////////////////////////////
cutlass::reference::device::Conv2d<
double,
LayoutInputA,
double,
LayoutInputB,
double,
LayoutOutput,
double,
double
>(
cutlass::conv::Operator::kFprop,
problem_size,
tensor_a_F64.device_ref(),
tensor_b_F64.device_ref(),
tensor_c_F64.device_ref(),
tensor_d_F64.device_ref(),
options.alpha,
options.beta);
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d_F64.sync_host();
////////////////////////////////////////////////////////////////////////////////
// Run reference kernel (F32)
////////////////////////////////////////////////////////////////////////////////
cutlass::reference::device::Conv2d<
float,
LayoutInputA,
float,
LayoutInputB,
float,
LayoutOutput,
float,
float
>(
cutlass::conv::Operator::kFprop,
problem_size,
tensor_a_F32.device_ref(),
tensor_b_F32.device_ref(),
tensor_c_F32.device_ref(),
tensor_d_F32.device_ref(),
options.alpha,
options.beta);
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d_F32.sync_host();
////////////////////////////////////////////////////////////////////////////////
/////// Compute l2 norms
////////////////////////////////////////////////////////////////////////////////
// l2 norm 3xTF32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_3xTF32_in_F64(options.output_size());
cutlass::reference::host::TensorCopy(tensor_d_3xTF32_in_F64.host_view(), tensor_d_3xTF32.host_view());
result.l2_norm_3xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_3xTF32_in_F64.host_view(), tensor_d_F64.host_view());
// l2 norm 1xTF32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_1xTF32_in_F64(options.output_size());
cutlass::reference::host::TensorCopy(tensor_d_1xTF32_in_F64.host_view(), tensor_d_1xTF32.host_view());
result.l2_norm_1xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_1xTF32_in_F64.host_view(), tensor_d_F64.host_view());
// l2 norm F32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_F32_in_F64(options.output_size());
cutlass::reference::host::TensorCopy(tensor_d_F32_in_F64.host_view(), tensor_d_F32.host_view());
result.l2_norm_fp32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_F32_in_F64.host_view(), tensor_d_F64.host_view());
///////////////////////////////////////////////////////////////////////////////
if (options.save_workspace) {
std::stringstream ss;
ss << "28_ampere_3xtf32_fast_accurate_tensorop_fprop_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a_F32.host_view() << "\n\n"
<< "Filters = \n" << tensor_b_F32.host_view() << "\n\n";
output_workspace << "TF32x3 = \n" << tensor_d_3xTF32.host_view() << std::endl;
output_workspace << "TF32x1 = \n" << tensor_d_1xTF32.host_view() << std::endl;
output_workspace << "FP32 = \n" << tensor_d_F32.host_view() << std::endl;
output_workspace << "FP64 = \n" << tensor_d_F64.host_view() << "\n\n";
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {1, 32, 64, 128, 256};
struct Benchmark {
int h, w, c, k, r, s;
} layers[] = {
{56, 56, 64, 256, 1, 1},
{56, 56, 64, 64, 1, 1},
{56, 56, 64, 64, 3, 3},
{56, 56, 256, 64, 1, 1},
{56, 56, 256, 512, 1, 1},
{56, 56, 256, 128, 1, 1},
{28, 28, 128, 128, 3, 3},
{28, 28, 128, 512, 1, 1},
{28, 28, 512, 128, 1, 1},
{28, 28, 512, 1024, 1, 1},
{28, 28, 512, 256, 1, 1},
{14, 14, 256, 256, 3, 3},
{14, 14, 256, 1024, 1, 1},
{14, 14, 1024, 256, 1, 1},
{14, 14, 1024, 2048, 1, 1},
{14, 14, 1024, 512, 1, 1},
{7, 7, 512, 512, 3, 3},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/ampere_3xtf32_fast_accurate_tensorop_fprop.cu/0 | {
"file_path": "examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/ampere_3xtf32_fast_accurate_tensorop_fprop.cu",
"repo_id": "examples",
"token_count": 10595
} | 3 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <cmath>
#include <iostream>
#include <vector>
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_complex.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/epilogue/threadblock/epilogue_visitor_with_softmax.h"
#include "cutlass/epilogue/threadblock/epilogue_with_visitor.h"
#include "cutlass/reduction/kernel/reduce_softmax_final.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "gemm_with_epilogue_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Kernel computes partial reduction
//
//
// 2. Sum[m, n'] = sum_n(exp(D[m, n] - N[m, 0]))
//
template <
typename ElementD_,
typename ElementNorm_,
typename ElementSum_,
typename ElementSoft_,
typename ElementSoftmaxCompute_,
int Alignment,
typename ApplyShape_ = MatrixShape<1, 1024>
>
class ApplySoftmax {
public:
using ElementD = ElementD_;
using ElementNorm = ElementNorm_;
using ElementSum = ElementSum_;
using ElementSoft = ElementSoft_;
using ElementSoftmaxCompute = ElementSoftmaxCompute_;
static int const kAlignment = Alignment;
using ApplyShape = ApplyShape_;
using Layout = cutlass::layout::RowMajor;
using TensorRefD = TensorRef<ElementD, Layout>;
using TensorRefN = TensorRef<ElementNorm, Layout>;
using TensorRefSum = TensorRef<ElementSum, Layout>;
using TensorRefSoft = TensorRef<ElementSoft, Layout>;
using FragmentSoftmax = Array<ElementSoftmaxCompute, kAlignment>;
//
// Arguments
//
struct Arguments {
MatrixCoord extent; ///< Extent of D and Softmax matrices
int batch_count; ///< Batch count
TensorRefD ref_D; ///< D matrix computed by GEMM+Max (input)
TensorRefN ref_N; ///< Norm tensor (input)
TensorRefSum ref_S; ///< Sum tensor (input)
TensorRefSoft ref_Soft; ///< Softmax tensor (output)
int64_t batch_stride_D; ///< Batch stride for D tensor
int64_t batch_stride_N; ///< Batch stride for N tensor
int64_t batch_stride_S; ///< Batch stride for S tensor
int64_t batch_stride_Soft; ///< Batch stride for softmax tensor
//
// Methods
//
Arguments():
batch_count(1),
batch_stride_D(0),
batch_stride_N(0),
batch_stride_S(0),
batch_stride_Soft(0)
{ }
Arguments(
MatrixCoord extent_, ///< Extent of D and Softmax matrices
int batch_count_, ///< Batch count
TensorRefD ref_D_, ///< D matrix computed by GEMM+PartialReduce
TensorRefN ref_N_, ///< Output parameter for N
TensorRefSum ref_S_, ///< Output parameter for N
TensorRefSoft ref_Soft_, ///< Softmax
int64_t batch_stride_D_ = 0,
int64_t batch_stride_N_ = 0,
int64_t batch_stride_S_ = 0,
int64_t batch_stride_Soft_ = 0
):
extent(extent_),
batch_count(batch_count_),
ref_D(ref_D_),
ref_N(ref_N_),
ref_S(ref_S_),
ref_Soft(ref_Soft_),
batch_stride_D(batch_stride_D_),
batch_stride_N(batch_stride_N_),
batch_stride_S(batch_stride_S_),
batch_stride_Soft(batch_stride_Soft_)
{
}
};
//
// Params struct
//
struct Params {
Arguments args;
//
// Methods
//
Params() { }
Params(Arguments const &args_): args(args_) { }
};
//
// SharedStorage
//
struct SharedStorage {
};
private:
public:
CUTLASS_DEVICE
ApplySoftmax() { }
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
apply(params, shared_storage);
}
private:
/// Compute Softmax
CUTLASS_DEVICE
void apply(Params const ¶ms, SharedStorage &shared_storage) {
using AccessTypeD = AlignedArray<ElementD, kAlignment>;
int block_batch = blockIdx.z;
int block_m = blockIdx.x * ApplyShape::kRow;
int block_n = 0;
int thread_m = threadIdx.y;
int thread_n = threadIdx.x * kAlignment;
int idx_m = block_m + thread_m;
int idx_n = block_n + thread_n;
int batch_offset_norm = block_batch * params.args.batch_stride_N;
int batch_offset_sum = block_batch * params.args.batch_stride_S;
// Kill off thread if it is outside the row boundary
if (params.args.extent.row() <= idx_m) {
return;
}
//
// Setup pointers to load D again
//
using AccessTypeD = AlignedArray<ElementD, kAlignment>;
using AccessTypeSoft = AlignedArray<ElementSoft, kAlignment>;
using FragmentSoft = Array<ElementSoft, kAlignment>;
using ConvertSoftCompute = cutlass::NumericArrayConverter<ElementSoftmaxCompute, ElementD, kAlignment>;
using ConvertSoftOutput = cutlass::NumericArrayConverter<ElementSoft, ElementSoftmaxCompute, kAlignment>;
using Mul = cutlass::multiplies<FragmentSoftmax>;
using Minus = cutlass::minus<FragmentSoftmax>;
using Exp = cutlass::fast_exp_op<FragmentSoftmax>;
ConvertSoftCompute convert_soft_compute;
ConvertSoftOutput convert_soft_output;
Minus minus;
Mul mul;
Exp exponential;
using ConvertSum = cutlass::NumericConverter<ElementSoftmaxCompute, ElementSum>;
using ConvertNorm = cutlass::NumericConverter<ElementSoftmaxCompute, ElementNorm>;
ConvertSum convert_sum;
ConvertNorm convert_norm;
AccessTypeD *access_d = reinterpret_cast<AccessTypeD *>(
params.args.ref_D.data() +
params.args.batch_stride_D * block_batch +
params.args.ref_D.layout()({idx_m, idx_n}));
AccessTypeSoft *access_soft = reinterpret_cast<AccessTypeSoft *>(
params.args.ref_Soft.data() +
params.args.batch_stride_Soft * block_batch +
params.args.ref_Soft.layout()({idx_m, idx_n}));
ElementSum inv_sum = (params.args.ref_S.data())[idx_m + batch_offset_sum];
ElementNorm norm = (params.args.ref_N.data())[idx_m + batch_offset_norm];
//
// Loop
//
CUTLASS_PRAGMA_UNROLL
for (
int idx = 0;
idx < params.args.extent.column();
idx += ApplyShape::kColumn * kAlignment) {
if (idx_n < params.args.extent.column()) {
AccessTypeD fetch;
arch::global_load<AccessTypeD, sizeof(AccessTypeD)>(fetch, access_d, true);
FragmentSoftmax result = mul(exponential(minus(convert_soft_compute(fetch), convert_norm(norm))), convert_sum(inv_sum));
FragmentSoft soft = convert_soft_output(result);
arch::global_store<FragmentSoft, sizeof(FragmentSoft)>(soft, access_soft, true);
}
access_d += ApplyShape::kColumn;
access_soft += ApplyShape::kColumn;
idx_n += ApplyShape::kColumn * kAlignment;
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
/////////////////////////////////////////////////////////////////////////////////////////////////
///
template <
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename LayoutB_,
typename ElementC_,
typename ElementCompute_,
typename OperatorClass_,
typename ArchTag_,
typename ThreadblockShape_,
typename WarpShape_,
typename InstructionShape_,
typename EpilogueFunctorOp_,
int kStages_,
typename ApplyShape_ = MatrixShape<1, 1024>,
int AlignmentA_ = 128 / cutlass::sizeof_bits<ElementA_>::value,
int AlignmentB_ = 128 / cutlass::sizeof_bits<ElementB_>::value,
int AlignmentSoftmax_ = 128 / cutlass::sizeof_bits<ElementC_>::value,
typename ElementNorm_ = float,
typename ElementSum_ = float,
typename ElementSoftmax_ = ElementC_
>
class GemmSoftmax {
public:
///////////////////////////////////////////////////////////////////////////////////////////////
//
// Type definitions
//
using ElementA = ElementA_;
using ElementB = ElementB_;
using ElementC = ElementC_;
using ElementCompute = ElementCompute_;
using ElementSum = ElementSum_;
using ElementSoft = ElementSoftmax_;
using ElementSoftmaxCompute = float;
using LayoutA = LayoutA_;
using LayoutB = LayoutB_;
using EpilogueFunctorOp = EpilogueFunctorOp_;
using ElementNorm = ElementNorm_;
using ApplyShape = ApplyShape_;
// These are mandatory layouts.
using LayoutC = cutlass::layout::RowMajor;
using LayoutN = cutlass::layout::RowMajor;
using LayoutS = cutlass::layout::RowMajor;
using LayoutSoft = cutlass::layout::RowMajor;
using TensorRefA = TensorRef<ElementA, LayoutA>;
using TensorRefB = TensorRef<ElementB, LayoutB>;
using TensorRefC = TensorRef<ElementC, LayoutC>;
using TensorRefN = TensorRef<ElementNorm, LayoutN>;
using TensorRefSum = TensorRef<ElementSum, LayoutS>;
using TensorRefSoft = TensorRef<ElementSoft, LayoutSoft>;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
static int const kStages = kStages_;
static int const AlignmentA = AlignmentA_;
static int const AlignmentB = AlignmentB_;
static int const AlignmentSoftmax = AlignmentSoftmax_;
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle;
///////////////////////////////////////////////////////////////////////////////////////////////
// basic GEMM kernel
using DefaultGemmKernel = typename cutlass::gemm::kernel::DefaultGemm<
ElementA,
LayoutA,
AlignmentA,
ElementB,
LayoutB,
AlignmentB,
ElementC,
LayoutC,
ElementCompute,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueFunctorOp,
ThreadblockSwizzle,
kStages,
true,
typename cutlass::gemm::device::DefaultGemmConfiguration<
OperatorClass, ArchTag, ElementA, ElementB, ElementC, ElementCompute>::Operator,
cutlass::gemm::SharedMemoryClearOption::kNone
>::GemmKernel;
///////////////////////////////////////////////////////////////////////////////////////////////
// Epilogue visitor
using EpilogueVisitor = typename cutlass::epilogue::threadblock::EpilogueVisitorSoftmax<
ThreadblockShape,
DefaultGemmKernel::kThreadCount,
typename DefaultGemmKernel::Epilogue::OutputTileIterator,
ElementCompute,
ElementNorm,
ElementSum,
ElementSoftmaxCompute,
EpilogueFunctorOp
>;
/// Epilogue
using Epilogue = typename cutlass::epilogue::threadblock::EpilogueWithVisitorFromExistingEpilogue<
EpilogueVisitor,
typename DefaultGemmKernel::Epilogue
>::Epilogue;
// GEMM
using GemmKernel = gemm::kernel::GemmWithEpilogueVisitor<
typename DefaultGemmKernel::Mma,
Epilogue,
ThreadblockSwizzle
>;
// Softmax kernel
using SoftmaxApplyKernel = kernel::ApplySoftmax<
ElementC,
ElementNorm,
ElementSum,
ElementSoft,
ElementSoftmaxCompute,
AlignmentSoftmax,
ApplyShape
>;
using ApplyFinalReductionKernel = cutlass::reduction::kernel::ApplySoftmaxFinalReduction<
ElementNorm,
ElementSum,
ElementSoftmaxCompute,
ThreadblockShape
>;
public:
/// Arguments class
struct Arguments {
typename GemmKernel::Arguments gemm;
typename SoftmaxApplyKernel::Arguments softmax;
typename ApplyFinalReductionKernel::Arguments reduction;
cutlass::gemm::GemmCoord extend;
//
// Methods
//
Arguments() { }
Arguments(
cutlass::gemm::GemmCoord problem_size,
int32_t batch_count_,
TensorRefA ref_A_,
TensorRefB ref_B_,
TensorRefC ref_C_,
TensorRefC ref_D_,
typename EpilogueFunctorOp::Params linear_scaling,
TensorRefN ref_N_,
TensorRefSum ref_S_,
TensorRefSoft ref_Softmax_,
int64_t batch_stride_A_ = 0,
int64_t batch_stride_B_ = 0,
int64_t batch_stride_C_ = 0,
int64_t batch_stride_D_ = 0,
int64_t batch_stride_Max_ = 0,
int64_t batch_stride_Sum_ = 0,
int64_t batch_stride_Softmax_ = 0
):
gemm(
cutlass::gemm::GemmUniversalMode::kBatched,
problem_size,
batch_count_,
ref_A_,
ref_B_,
ref_C_,
ref_D_,
ref_N_.data(),
ref_S_.data(),
batch_stride_A_,
batch_stride_B_,
typename EpilogueVisitor::Arguments(
linear_scaling,
batch_stride_C_,
batch_stride_D_,
batch_stride_Max_,
batch_stride_Sum_
)
),
reduction(
problem_size,
ref_N_.data(),
ref_S_.data(),
batch_stride_Max_,
batch_stride_Sum_
),
softmax(
MatrixCoord(problem_size.m(), problem_size.n()),
batch_count_,
ref_D_,
ref_N_,
ref_S_,
ref_Softmax_,
batch_stride_D_,
batch_stride_Max_,
batch_stride_Sum_,
batch_stride_Softmax_
),
extend(problem_size)
{
}
};
struct Params {
typename GemmKernel::Params gemm;
typename SoftmaxApplyKernel::Params softmax;
typename ApplyFinalReductionKernel::Params reduction;
MatrixCoord extend;
//
// Methods
//
Params() { }
Params(Arguments const &args):
gemm(args.gemm),
reduction(args.reduction),
softmax(args.softmax),
extend(MatrixCoord(args.extend.m(), args.extend.n()))
{
}
};
public:
// Gemm
//
// Methods
//
private:
Params params_;
public:
/// Ctor
GemmSoftmax() {
}
/// Initialize
Status initialize(Arguments const &args) {
params_ = Params(args);
return cutlass::Status::kSuccess;
}
/// Run
Status run(cudaStream_t stream) {
//
// Launch the GEMM + max kernel
//
dim3 gemm_grid = ThreadblockSwizzle().get_grid_shape(params_.gemm.grid_tiled_shape);
dim3 gemm_block(GemmKernel::kThreadCount, 1, 1);
int gemm_smem_size = int(sizeof(typename GemmKernel::SharedStorage));
cudaError_t result;
if (gemm_smem_size >= (48 << 10)) {
result = cudaFuncSetAttribute(cutlass::Kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
gemm_smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
cutlass::Kernel<GemmKernel><<<gemm_grid, gemm_block, gemm_smem_size, stream>>>(params_.gemm);
result = cudaGetLastError();
if (result != cudaSuccess) {
return cutlass::Status::kErrorInternal;
}
//
// Launch the ApplyFinalReductionKernel
//
int thread_per_block = 128;
int block_per_row = (params_.extend.row() + thread_per_block - 1) / thread_per_block;
if (block_per_row < 4) {
thread_per_block = 32;
block_per_row = (params_.extend.row() + thread_per_block - 1) / thread_per_block;
}
dim3 final_reduction_grid(block_per_row, 1, params_.softmax.args.batch_count);
dim3 final_reduction_block(thread_per_block);
Kernel<ApplyFinalReductionKernel><<<
final_reduction_grid, final_reduction_block, sizeof(typename ApplyFinalReductionKernel::SharedStorage), stream
>>>(params_.reduction);
result = cudaGetLastError();
if (result != cudaSuccess) {
return cutlass::Status::kErrorInternal;
}
//
// Launch the SoftmaxApplyKernel
//
dim3 apply_block(SoftmaxApplyKernel::ApplyShape::kColumn, SoftmaxApplyKernel::ApplyShape::kRow);
int threadblock_rows = SoftmaxApplyKernel::ApplyShape::kRow;
int threadblock_columns = SoftmaxApplyKernel::ApplyShape::kColumn * SoftmaxApplyKernel::kAlignment;
dim3 apply_grid(
(params_.softmax.args.extent.row() + threadblock_rows - 1) / threadblock_rows,
(params_.softmax.args.extent.column() + threadblock_columns - 1) / threadblock_columns,
params_.softmax.args.batch_count);
Kernel<SoftmaxApplyKernel><<<
apply_grid, apply_block, sizeof(typename SoftmaxApplyKernel::SharedStorage), stream
>>>(params_.softmax);
result = cudaGetLastError();
if (result != cudaSuccess) {
return cutlass::Status::kErrorInternal;
}
return cutlass::Status::kSuccess;
}
/// Function call operator
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/35_gemm_softmax/gemm_with_softmax.h/0 | {
"file_path": "examples/35_gemm_softmax/gemm_with_softmax.h",
"repo_id": "examples",
"token_count": 7230
} | 4 |
################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import sys
print("This example is deprecated. Please see examples/python for examples of using "
"the CUTLASS Python interface.")
sys.exit(0)
import numpy as np
import cutlass.backend as pycutlass
from cutlass.backend import *
from cutlass.backend.utils.device import device_cc
from cutlass.backend.conv2d_operation import *
from cutlass.backend.utils.reference_model import Conv2dReferenceModule
import torch.nn.functional as F
import argparse
# parse the arguments
parser = argparse.ArgumentParser(description="Launch CUTLASS convolution 2d kernels from Python")
# Operation description
# math instruction description
parser.add_argument("-i", "--instruction_shape",
default=[1, 1, 1], nargs=3, type=int,
help="This option describes the size of MMA op")
parser.add_argument("-ta", "--element_a", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor A')
parser.add_argument("-tb", "--element_b", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor B')
parser.add_argument("-tc", "--element_c", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor C and output tensor D')
parser.add_argument("-tacc", "--element_acc", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of accumulator')
parser.add_argument('-m', "--math", default="multiply_add",
type=str, choices=["multiply_add", "multiply_add_fast_bf16", "multiply_add_fast_f32"], help="math instruction")
parser.add_argument('-op', "--opcode", default="Simt", type=str,
choices=["Simt", 'TensorOp'],
help='This option describes whether you want to use tensor \
cores (TensorOp) or regular SIMT cores (Simt) on GPU SM')
# tile description
parser.add_argument("-b", "--threadblock_shape",
default=[128, 128, 8], nargs=3, type=int,
help="This option describes the tile size a thread block with compute")
parser.add_argument("-s", "--stages", default=4,
type=int, help="Number of pipelines you want to use")
parser.add_argument("-w", "--warp_count", default=[
4, 2, 1], nargs=3, type=int,
help="This option describes the number of warps along M, N, and K of the threadblock")
parser.add_argument("-cc", "--compute_capability", default=80,
type=int, help="This option describes CUDA SM architecture number")
# A
parser.add_argument('-la', "--layout_a", default="TensorNHWC", type=str, choices=[
"TensorNHWC", "TensorNC32HW32"],
help="Memory layout of input tensor A")
parser.add_argument('-aa', '--alignment_a', default=1,
type=int, help="Memory alignement of input tensor A")
# B
parser.add_argument('-lb', "--layout_b", default="TensorNHWC", type=str, choices=[
"TensorNHWC", "TensorC32RSK32"],
help="Memory layout of input tensor B")
parser.add_argument('-ab', '--alignment_b', default=1,
type=int, help="Memory alignment of input tensor B")
# C
parser.add_argument('-lc', "--layout_c", default="TensorNHWC", type=str, choices=[
"TensorNHWC", "TensorNC32HW32"],
help="Memory layout of input tensor C and output tensor D")
parser.add_argument('-ac', '--alignment_c', default=1,
type=int, help="Memory alignment of input tensor C and output tensor D")
# epilogue
parser.add_argument("-te", "--element_epilogue", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16'],
help='Data type of computation in the epilogue')
parser.add_argument("-ep", "--epilogue_functor", default="LinearCombination",
type=str, choices=['LinearCombination', 'FastLinearCombinationClamp', 'LinearCombinationClamp'],
help="This option describes the epilogue part of the kernel")
# swizzling
parser.add_argument("-sw", "--swizzling_functor", default="IdentitySwizzle1", type=str, choices=[
"IdentitySwizzle1", "IdentitySwizzle2", "IdentitySwizzle4", "IdentitySwizzle8",
"HorizontalSwizzle", "StridedDgradIdentitySwizzle1", "StridedDgradIdentitySwizzle4",
"StridedDgradHorizontalSwizzle"],
help="This option describes how thread blocks are scheduled on GPU")
# conv related
parser.add_argument("-co", "--conv_kind", default="fprop", type=str, choices=['fprop', 'dgrad', 'wgrad'],
help="The type of convolution: forward propagation (fprop), \
gradient of activation (dgrad), gradient of weight (wgrad)")
parser.add_argument("-st", "--stride_support", default="Strided", type=str, choices=["Strided", "Unity"],
)
parser.add_argument("-ia", "--iterator_algorithm", default="analytic", type=str,
choices=["analytic", "optimized", "fixed_channels", "few_channels"],
help="This option describes iterator algorithm")
# arguments
parser.add_argument("-sm", "--split_k_mode", default="Serial", type=str, choices=["Serial", "Parallel"],
help="Split K Mode. Serial is used for non-splitK or serial-splitK.\
Parallel is used for parallel splitK.")
parser.add_argument('-k', '--split_k_slices', default=1,
type=int, help="Number of split-k partitions. (default 1)")
parser.add_argument("-nhwc", "--nhwc", nargs=4, type=int, help="input size (NHWC)")
parser.add_argument("-krsc", "--krsc", nargs=4, type=int, help="filter size (KRSC)")
parser.add_argument("-pad", "--pad", nargs=4, type=int, help="padding (pad_h, _, pad_w, _)")
parser.add_argument("-stride", "--stride", nargs=2, type=int, help="stride (stride_h, stride_w)")
parser.add_argument("-dilation", "--dilation", nargs=2, type=int, help="dilation (dilation_h, dilation_w)")
parser.add_argument("-alpha", "--alpha", default=1.0, type=float, help="alpha")
parser.add_argument("-beta", "--beta", default=0.0, type=float, help="beta")
parser.add_argument('-bias', '--bias', action='store_true', help="C is bias vector")
# Activation function
parser.add_argument("-activ", "--activation_function", default="identity",
choices=["identity", "relu", "leaky_relu", "tanh", "sigmoid", "silu", "hardswish", "gelu"], help="activation function")
parser.add_argument("-activ_arg", "--activation_args", default=[], nargs="+", type=float,
help="addition arguments for activation")
parser.add_argument('--print_cuda', action="store_true",
help="print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
cc = device_cc()
if args.compute_capability != cc:
raise Exception(("Parameter --compute-capability of {} "
"does not match that of the device of {}.").format(args.compute_capability, cc))
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
np.random.seed(0)
element_a = getattr(cutlass_bindings, args.element_a)
element_b = getattr(cutlass_bindings, args.element_b)
element_c = getattr(cutlass_bindings, args.element_c)
element_acc = getattr(cutlass_bindings, args.element_acc)
math_operation = getattr(MathOperation, args.math)
opclass = getattr(cutlass_bindings.OpClass, args.opcode)
math_inst = MathInstruction(
args.instruction_shape, element_a, element_b,
element_acc, opclass, math_operation
)
tile_description = TileDescription(
args.threadblock_shape, args.stages, args.warp_count,
math_inst
)
layout_a = getattr(cutlass_bindings, args.layout_a)
layout_b = getattr(cutlass_bindings, args.layout_b)
layout_c = getattr(cutlass_bindings, args.layout_c)
A = TensorDescription(
element_a, layout_a, args.alignment_a
)
B = TensorDescription(
element_b, layout_b, args.alignment_b
)
C = TensorDescription(
element_c, layout_c, args.alignment_c
)
element_epilogue = getattr(cutlass_bindings, args.element_epilogue)
if (args.activation_function == "identity"
or (args.split_k_mode == "Parallel" and args.split_k_slices > 1)):
#
epilogue_functor = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
iterator_algorithm = getattr(cutlass_bindings.conv.IteratorAlgorithm, args.iterator_algorithm)
swizzling_functor = getattr(cutlass_bindings, args.swizzling_functor)
stride_support = getattr(StrideSupport, args.stride_support)
conv_kind = getattr(cutlass_bindings.conv.Operator, args.conv_kind)
operation = Conv2dOperation(
conv_kind=conv_kind, iterator_algorithm=iterator_algorithm,
arch=args.compute_capability, tile_description=tile_description,
A=A, B=B, C=C, stride_support=stride_support,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
if args.print_cuda:
print(operation.rt_module.emit())
operations = [operation,]
if args.split_k_mode == "Parallel" and args.split_k_slices > 1:
if (args.activation_function == "identity"):
epilogue_functor_reduction = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor_reduction = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
reduction_operation = ReductionOperation(
shape=cutlass_bindings.MatrixCoord(4, 32 * C.alignment),
C=C, element_accumulator=element_acc,
element_compute=element_epilogue,
epilogue_functor=epilogue_functor_reduction,
count=C.alignment
)
operations.append(reduction_operation)
pycutlass.compiler.add_module(operations)
problem_size = cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(args.nhwc[0], args.nhwc[1], args.nhwc[2], args.nhwc[3]),
cutlass_bindings.Tensor4DCoord(args.krsc[0], args.krsc[1], args.krsc[2], args.krsc[3]),
cutlass_bindings.Tensor4DCoord(args.pad[0], args.pad[1], args.pad[2], args.pad[3]),
cutlass_bindings.MatrixCoord(args.stride[0], args.stride[1]),
cutlass_bindings.MatrixCoord(args.dilation[0], args.dilation[1]),
cutlass_bindings.conv.Mode.cross_correlation,
args.split_k_slices, 1
)
# User-provide inputs
tensor_A_size = cutlass_bindings.conv.implicit_gemm_tensor_a_size(
conv_kind, problem_size
)
tensor_B_size = cutlass_bindings.conv.implicit_gemm_tensor_b_size(
conv_kind, problem_size
)
if args.bias:
tensor_C_size = cutlass_bindings.conv.implicit_gemm_tensor_c_extent(
conv_kind, problem_size
).at(3)
else:
tensor_C_size = cutlass_bindings.conv.implicit_gemm_tensor_c_size(
conv_kind, problem_size
)
tensor_D_size = cutlass_bindings.conv.implicit_gemm_tensor_c_size(
conv_kind, problem_size
)
if args.element_a != "int8":
tensor_A = torch.ceil(torch.empty(size=(tensor_A_size,), dtype=getattr(torch, args.element_a), device="cuda").uniform_(-8.5, 7.5))
else:
tensor_A = torch.empty(size=(tensor_A_size,), dtype=getattr(torch, args.element_a), device="cuda").uniform_(-2, 2)
if args.element_b != "int8":
tensor_B = torch.ceil(torch.empty(size=(tensor_B_size,), dtype=getattr(torch, args.element_b), device="cuda").uniform_(-8.5, 7.5))
else:
tensor_B = torch.empty(size=(tensor_B_size,), dtype=getattr(torch, args.element_b), device="cuda").uniform_(-2, 2)
if args.element_c != "int8":
tensor_C = torch.ceil(torch.empty(size=(tensor_C_size,), dtype=getattr(torch, args.element_c), device="cuda").uniform_(-8.5, 7.5))
else:
tensor_C = torch.empty(size=(tensor_C_size,), dtype=getattr(torch, args.element_c), device="cuda").uniform_(-2, 2)
tensor_D = torch.ones(size=(tensor_D_size,), dtype=getattr(torch, args.element_c), device="cuda")
arguments = Conv2dArguments(
operation=operation, problem_size=problem_size, A=tensor_A,
B=tensor_B, C=tensor_C, D=tensor_D,
output_op = operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args)),
split_k_mode=getattr(cutlass_bindings.conv.SplitKMode, args.split_k_mode),
split_k_slices=problem_size.split_k_slices
)
if args.split_k_mode == "Parallel" and args.split_k_slices > 1:
implicit_gemm_size = cutlass_bindings.conv.implicit_gemm_problem_size(conv_kind, arguments.problem_size)
reduction_arguments = ReductionArguments(
reduction_operation,
problem_size=[implicit_gemm_size.m(), implicit_gemm_size.n()],
partitions=problem_size.split_k_slices,
workspace=arguments.ptr_D,
destination=tensor_D,
source=tensor_C,
output_op = reduction_operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args)),
bias = arguments.bias
)
operation.run(arguments)
if args.split_k_mode == "Parallel" and args.split_k_slices > 1:
reduction_operation.run(reduction_arguments)
reduction_arguments.sync()
else:
arguments.sync()
reference_model = Conv2dReferenceModule(A, B, C, conv_kind)
tensor_D_ref = reference_model.run(tensor_A, tensor_B, tensor_C, arguments.problem_size, args.alpha, args.beta, args.bias)
if (args.activation_function != "identity"):
tensor_D_ref = getattr(F, args.activation_function)(*([tensor_D_ref,] + args.activation_args))
try:
assert torch.equal(tensor_D, tensor_D_ref)
except:
assert torch.allclose(tensor_D, tensor_D_ref, rtol=1e-2)
print("Passed.")
| examples/40_cutlass_py/customizable/conv2d.py/0 | {
"file_path": "examples/40_cutlass_py/customizable/conv2d.py",
"repo_id": "examples",
"token_count": 6379
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS Attention Example.
This workload computes a fused multi head attention.
Because it keeps the attention matrix in shared memory, it's both faster and
uses less global memory.
This is based on `"Self-Attention Does Not Need O(n^2) Memory" <http://arxiv.org/abs/2112.05682>`_,
and very similar to `"FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness" <https://arxiv.org/abs/2205.14135>`_.
Algorithm:
In short, we can compute the output incrementally in blocks of size B,
we just need to divide the final result by the sum of all coefficients in
the softmax (which we compute incrementally) with the following pseudo-code:
```
s_prime = torch.zeros([num_queries, B])
O = torch.zeros([num_queries, head_size_v])
for i in range(0, K.shape[0], B):
si = exp((Q . K[i * B:(i+1) * B].t) * scale)
sum_coefs += attn_unscaled.sum(-1)
O += si . V[i * B:(i+1) * B]
O = O / s_prime
```
In practice, and for numerical stability reasons,
we also substract the maximum so far (`mi`) before doing
the exponential. When we encounter new keys, the maximum
used to compute O so far (`m_prime`) can differ from the
current maximum, so we update O before accumulating with
```
O = O * exp(m_prime - mi)
m_prime = mi
```
Implementation details:
- `si` is stored in shared memory between the 2 back to back gemms
- we keep and accumulate the output
directly in registers if we can (`head_size_v <= 128`).
Otherwise, we store it & accumulate in global memory (slower)
- blocks are parallelized across the batch dimension, the number
of heads, and the query sequence size
Examples:
# Run an attention example with default setup
$ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_fixed_seqlen
# Run an attention example with custom setup
$ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_fixed_seqlen --head_number=2 --batch_size=3 --head_size=32 --head_size_v=64 --seq_length=512 --seq_length_kv=1024 --causal=true
Acknowledgement: Fixed-sequence-length FMHA code was upstreamed by Meta xFormers (https://github.com/facebookresearch/xformers).
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_grouped.h"
#include "cutlass/gemm/kernel/default_gemm_grouped.h"
#include "cutlass/gemm/device/gemm_grouped.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/device/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/kernel/gemm_grouped.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_complex.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/threadblock/epilogue_with_visitor.h"
#include "cutlass/fast_math.h"
#include "kernel_forward.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
bool error;
bool reference_check;
bool use_mask;
bool causal;
std::vector<cutlass::gemm::GemmCoord> problem_sizes0;
std::vector<cutlass::gemm::GemmCoord> problem_sizes1;
std::vector<cutlass::gemm::GemmCoord> problem_sizes0_real;
std::vector<cutlass::gemm::GemmCoord> problem_sizes1_real;
int alignment;
int head_number;
int batch_size;
int head_size;
int head_size_v;
int seq_length;
int seq_length_kv;
int iterations;
// alpha0, alpha1 and beta are fixed
// in this multi-head attention example
float alpha0;
float alpha1;
float beta;
//
// Methods
//
Options():
help(false),
error(false),
alignment(1),
reference_check(true),
head_number(12),
batch_size(16),
head_size(64),
head_size_v(64),
seq_length(1024),
seq_length_kv(1024),
use_mask(false),
iterations(20),
causal(false)
{ }
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("alignment", alignment, 1);
cmd.get_cmd_line_argument("head_number", head_number, 12);
cmd.get_cmd_line_argument("batch_size", batch_size, 16);
cmd.get_cmd_line_argument("head_size", head_size, 64);
cmd.get_cmd_line_argument("head_size_v", head_size_v, head_size);
cmd.get_cmd_line_argument("seq_length", seq_length, 1024);
cmd.get_cmd_line_argument("seq_length_kv", seq_length_kv, seq_length);
cmd.get_cmd_line_argument("use_mask", use_mask, false);
cmd.get_cmd_line_argument("iterations", iterations, 20);
cmd.get_cmd_line_argument("reference-check", reference_check, true);
cmd.get_cmd_line_argument("causal", causal, true);
randomize_problems();
}
void randomize_problems() {
int problem_count = head_number * batch_size;
problem_sizes0.reserve(problem_count);
problem_sizes1.reserve(problem_count);
// When using mask, the original inputs are not padded
// and we need to save these info.
if (use_mask) {
problem_sizes0_real.reserve(problem_count);
problem_sizes1_real.reserve(problem_count);
}
for (int i = 0; i < batch_size; ++i) {
// problems belonging to the same batch share the same seq len
int m_real = seq_length;
int mkv_real = seq_length_kv;
int m = (m_real + alignment - 1) / alignment * alignment;
int mkv = (mkv_real + alignment - 1) / alignment * alignment;
int k0 = head_size;
int k1 = head_size_v;
for (int j = 0; j < head_number; ++j) {
cutlass::gemm::GemmCoord problem0(m, mkv, k0);
cutlass::gemm::GemmCoord problem1(m, k1, mkv);
problem_sizes0.push_back(problem0);
problem_sizes1.push_back(problem1);
if (use_mask) {
cutlass::gemm::GemmCoord problem0_real(m_real, mkv_real, k0);
cutlass::gemm::GemmCoord problem1_real(m_real, k1, mkv_real);
problem_sizes0_real.push_back(problem0_real);
problem_sizes1_real.push_back(problem1_real);
}
}
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "41_fused_multi_head_attention_fixed_seqlen\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --head_number=<int> Head number in multi-head attention (default: --head_number=12)\n"
<< " --batch_size=<int> Batch size in multi-head attention (default: --batch_size=16)\n"
<< " --head_size=<int> Head size in multi-head attention (default: --head_size=64)\n"
<< " --head_size_v=<int> Head size in multi-head attention for V (default: --head_size_v=head_size)\n"
<< " --seq_length=<int> Sequence length in multi-head attention for Q (default: --seq_length=1024)\n"
<< " --seq_length_kv=<int> Sequence length in multi-head attention for K/V (default: --seq_length_kv=seq_length)\n"
<< " --use_mask=<bool> If true, performs padding-like masking in softmax.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --reference-check=<bool> If true, performs reference check.\n"
<< " --causal=<bool> If true, uses causal masking.\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fops = int64_t();
for (size_t i = 0; i < problem_sizes0.size(); ++i) {
auto const& problem0 = problem_sizes0[i];
auto const& problem1 = problem_sizes1[i];
for (int row = 0; row < problem0.m(); ++row) {
int num_cols0 = problem0.n();
if (causal) {
num_cols0 = std::min(row + 1, num_cols0);
}
// P <- Q . K_t
fops += 2 * num_cols0 * problem0.k();
// P <- exp(P - max(P))
fops += 2 * num_cols0;
// S <- sum(P)
fops += num_cols0 - 1;
// O <- P . V
fops += 2 * num_cols0 * problem1.n();
// O <- O / S
fops += num_cols0 * problem1.n();
}
}
return double(fops) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Attention>
class TestbedAttention {
public:
//
// Type definitions
//
using ElementQ = typename Attention::scalar_t;
using ElementK = typename Attention::scalar_t;
using ElementP = typename Attention::accum_t;
using ElementAccumulator = typename Attention::accum_t;
using ElementV = typename Attention::scalar_t;
using ElementO = typename Attention::output_t;
using ElementCompute = typename Attention::accum_t;
using ElementNorm = typename Attention::accum_t;
using ElementSum = typename Attention::accum_t;
using ElementSoftmaxCompute = typename Attention::accum_t;
using LayoutQ = cutlass::layout::RowMajor;
using LayoutK = cutlass::layout::ColumnMajor;
using LayoutP = cutlass::layout::RowMajor;
using LayoutV = cutlass::layout::RowMajor;
using LayoutO = cutlass::layout::RowMajor;
using MatrixCoord = typename LayoutP::TensorCoord;
private:
//
// Data members
//
Options & options;
/// Initialization
cutlass::Distribution::Kind init_Q;
cutlass::Distribution::Kind init_K;
cutlass::Distribution::Kind init_P;
cutlass::Distribution::Kind init_V;
cutlass::Distribution::Kind init_O;
uint32_t seed;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device1;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0_real;
std::vector<int64_t> offset_Q;
std::vector<int64_t> offset_K;
std::vector<int64_t> offset_P;
std::vector<int64_t> offset_V;
std::vector<int64_t> offset_O;
std::vector<int64_t> ldq_host;
std::vector<int64_t> ldk_host;
std::vector<int64_t> ldp_host;
std::vector<int64_t> ldv_host;
std::vector<int64_t> ldo_host;
std::vector<int64_t> seqlen_host;
cutlass::DeviceAllocation<int64_t> ldq;
cutlass::DeviceAllocation<int64_t> ldk;
cutlass::DeviceAllocation<int64_t> ldp;
cutlass::DeviceAllocation<int64_t> ldv;
cutlass::DeviceAllocation<int64_t> ldo;
cutlass::DeviceAllocation<int64_t> seqlen;
cutlass::DeviceAllocation<ElementQ> block_Q;
cutlass::DeviceAllocation<ElementK> block_K;
cutlass::DeviceAllocation<ElementP> block_P;
cutlass::DeviceAllocation<ElementV> block_V;
cutlass::DeviceAllocation<ElementO> block_O;
cutlass::DeviceAllocation<ElementNorm> block_Norm;
cutlass::DeviceAllocation<ElementSum> block_Sum;
cutlass::DeviceAllocation<int64_t> offset_P_Device;
cutlass::DeviceAllocation<ElementQ *> ptr_Q;
cutlass::DeviceAllocation<ElementK *> ptr_K;
cutlass::DeviceAllocation<ElementP *> ptr_P;
cutlass::DeviceAllocation<ElementV *> ptr_V;
cutlass::DeviceAllocation<ElementO *> ptr_O;
public:
//
// Methods
//
TestbedAttention(
Options &options_,
cutlass::Distribution::Kind init_Q_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_K_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_P_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_V_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_O_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
):
options(options_), init_Q(init_Q_), init_K(init_K_), init_P(init_P_), init_V(init_V_), init_O(init_O_), seed(seed_) { }
int problem_count() const {
return (options.head_number * options.batch_size);
}
private:
/// Helper to initialize a tensor view
template <typename Element>
void initialize_tensor_(
Element *ptr,
size_t capacity,
cutlass::Distribution::Kind dist_kind,
uint32_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<ElementP>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 8;
scope_min = -8;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::device::BlockFillRandomUniform(
ptr, capacity, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::device::BlockFillRandomGaussian(
ptr, capacity, seed, Element(), Element(0.5f));
}
else if (dist_kind == cutlass::Distribution::Sequential) {
// Fill with increasing elements
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(1), Element());
}
else {
// Fill with all 1s
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(), Element(1));
}
}
/// Initializes data structures
void initialize_() {
//
// Set scalors for the mha example
//
options.alpha0 = 1.0f / sqrt(float(options.head_size));
options.alpha1 = 1.0f;
options.beta = 0;
//
// Choose random problem sizes
//
// construct a few problems of random sizes
srand(seed);
int64_t total_elements_Q = 0;
int64_t total_elements_K = 0;
int64_t total_elements_P = 0;
int64_t total_elements_V = 0;
int64_t total_elements_O = 0;
ldq_host.resize(problem_count());
ldk_host.resize(problem_count());
ldp_host.resize(problem_count());
ldv_host.resize(problem_count());
ldo_host.resize(problem_count());
seqlen_host.resize(problem_count());
// Create tensors in BMHK format, where
// B = batch_size
// M = sequence length
// H = num_heads
// K = embedding size per head
int64_t batch_offset_Q, batch_offset_K, batch_offset_V, batch_offset_O;
for (int32_t b = 0; b < options.batch_size; ++b) {
batch_offset_Q = total_elements_Q;
batch_offset_K = total_elements_K;
batch_offset_V = total_elements_V;
batch_offset_O = total_elements_O;
for (int32_t h = 0; h < options.head_number; ++h) {
int32_t i = h + b * options.head_number;
auto problem0 = options.problem_sizes0.at(i);
auto problem1 = options.problem_sizes1.at(i);
ldq_host.at(i) = LayoutQ::packed({problem0.m(), options.head_number * problem0.k()}).stride(0);
ldk_host.at(i) = LayoutK::packed({options.head_number * problem0.k(), problem0.n()}).stride(0);
ldp_host.at(i) = LayoutP::packed({problem0.m(), problem0.n()}).stride(0);
ldv_host.at(i) = LayoutV::packed({problem1.k(), options.head_number * problem1.n()}).stride(0);
ldo_host.at(i) = LayoutO::packed({problem1.m(), options.head_number * problem1.n()}).stride(0);
// m = n for attention problems.
seqlen_host.at(i) = problem0.m();
offset_Q.push_back(batch_offset_Q + h * problem0.k());
offset_K.push_back(batch_offset_K + h * problem0.k());
offset_P.push_back(total_elements_P);
offset_V.push_back(batch_offset_V + h * problem0.k());
offset_O.push_back(batch_offset_O + h * problem1.n());
int64_t elements_Q = problem0.m() * problem0.k();
int64_t elements_K = problem0.k() * problem0.n();
int64_t elements_P = problem0.m() * problem0.n();
int64_t elements_V = problem1.k() * problem1.n();
int64_t elements_O = problem1.m() * problem1.n();
total_elements_Q += elements_Q;
total_elements_K += elements_K;
total_elements_P += elements_P;
total_elements_V += elements_V;
total_elements_O += elements_O;
}
}
problem_sizes_device0.reset(problem_count());
problem_sizes_device1.reset(problem_count());
problem_sizes_device0.copy_from_host(options.problem_sizes0.data());
problem_sizes_device1.copy_from_host(options.problem_sizes1.data());
if (options.use_mask) {
problem_sizes_device0_real.reset(problem_count());
problem_sizes_device0_real.copy_from_host(options.problem_sizes0_real.data());
}
ldq.reset(problem_count());
ldk.reset(problem_count());
ldp.reset(problem_count());
ldv.reset(problem_count());
ldo.reset(problem_count());
seqlen.reset(problem_count());
ldq.copy_from_host(ldq_host.data());
ldk.copy_from_host(ldk_host.data());
ldp.copy_from_host(ldp_host.data());
ldv.copy_from_host(ldv_host.data());
ldo.copy_from_host(ldo_host.data());
seqlen.copy_from_host(seqlen_host.data());
//
// Assign pointers
//
block_Q.reset(total_elements_Q);
block_K.reset(total_elements_K);
block_P.reset(total_elements_P);
block_V.reset(total_elements_V);
block_O.reset(total_elements_O);
offset_P_Device.reset(problem_count());
// sync offset with device
cutlass::device_memory::copy_to_device(offset_P_Device.get(), offset_P.data(), offset_P.size());
std::vector<ElementQ *> ptr_Q_host(problem_count());
std::vector<ElementK *> ptr_K_host(problem_count());
std::vector<ElementP *> ptr_P_host(problem_count());
std::vector<ElementV *> ptr_V_host(problem_count());
std::vector<ElementO *> ptr_O_host(problem_count());
std::vector<ElementNorm *> ptr_norm_host(problem_count());
std::vector<ElementSum *> ptr_sum_host(problem_count());
for (int32_t i = 0; i < problem_count(); ++i) {
ptr_Q_host.at(i) = block_Q.get() + offset_Q.at(i);
ptr_K_host.at(i) = block_K.get() + offset_K.at(i);
ptr_P_host.at(i) = block_P.get() + offset_P.at(i);
ptr_V_host.at(i) = block_V.get() + offset_V.at(i);
ptr_O_host.at(i) = block_O.get() + offset_O.at(i);
}
ptr_Q.reset(problem_count());
ptr_Q.copy_from_host(ptr_Q_host.data());
ptr_K.reset(problem_count());
ptr_K.copy_from_host(ptr_K_host.data());
ptr_P.reset(problem_count());
ptr_P.copy_from_host(ptr_P_host.data());
ptr_V.reset(problem_count());
ptr_V.copy_from_host(ptr_V_host.data());
ptr_O.reset(problem_count());
ptr_O.copy_from_host(ptr_O_host.data());
//
// Initialize the problems of the workspace
//
initialize_tensor_(block_Q.get(), total_elements_Q, init_Q, seed + 1);
initialize_tensor_(block_K.get(), total_elements_K, init_K, seed + 2);
initialize_tensor_(block_V.get(), total_elements_V, init_V, seed + 3);
}
template<typename Element>
bool verify_tensor_(std::vector<Element> vector_Input, \
std::vector<Element> vector_Input_Ref,
int64_t verify_length = -1) {
int64_t size = (vector_Input.size() < vector_Input_Ref.size()) ? vector_Input.size() : vector_Input_Ref.size();
size = (verify_length == -1) ? size : verify_length;
// 0.05 for absolute error
float abs_tol = 5e-2f;
// 10% for relative error
float rel_tol = 1e-1f;
for (int64_t i = 0; i < size; ++i) {
float diff = (float)(vector_Input.at(i) - vector_Input_Ref.at(i));
float abs_diff = fabs(diff);
float abs_ref = fabs((float)vector_Input_Ref.at(i) + 1e-5f);
float relative_diff = abs_diff / abs_ref;
if ( (isnan(vector_Input_Ref.at(i)) || isnan(abs_diff) || isinf(abs_diff)) || (abs_diff > abs_tol && relative_diff > rel_tol)) {
printf("[%d/%d] diff = %f, rel_diff = %f, {computed=%f, ref=%f}.\n", int(i), int(size), abs_diff, relative_diff, (float)(vector_Input.at(i)), (float)(vector_Input_Ref.at(i)));
return false;
}
}
return true;
}
/// Verifies the result is a GEMM
bool verify_() {
bool passed = true;
for (int32_t b = 0; b < options.batch_size; ++b) {
int32_t i = b * options.head_number;
// Problem size is the same for all heads
cutlass::gemm::GemmCoord problem0 = options.problem_sizes0.at(b * options.head_number);
cutlass::gemm::GemmCoord problem1 = options.problem_sizes1.at(b * options.head_number);
MatrixCoord extent_Q{problem0.m(), problem0.k()};
MatrixCoord extent_K{problem0.k(), problem0.n()};
MatrixCoord extent_P{problem0.m(), problem0.n()};
MatrixCoord extent_V{problem1.k(), problem1.n()};
MatrixCoord extent_O{problem1.m(), problem1.n()};
LayoutO layout_O(ldo_host.at(i));
std::vector<ElementO> matrix_O(layout_O.capacity(extent_O));
cutlass::device_memory::copy_to_host(matrix_O.data(), block_O.get() + offset_O.at(i), matrix_O.size());
cutlass::DeviceAllocation<ElementO> block_Ref_O(layout_O.capacity(extent_O));
for (int32_t h = 0; h < options.head_number; ++h) {
i = h + b * options.head_number;
LayoutQ layout_Q(ldq_host.at(i));
LayoutK layout_K(ldk_host.at(i));
LayoutP layout_P(ldp_host.at(i));
LayoutV layout_V(ldv_host.at(i));
cutlass::TensorView<ElementQ, LayoutQ> view_Q(block_Q.get() + offset_Q.at(i), layout_Q, extent_Q);
cutlass::TensorView<ElementK, LayoutK> view_K(block_K.get() + offset_K.at(i), layout_K, extent_K);
cutlass::TensorView<ElementV, LayoutV> view_V(block_V.get() + offset_V.at(i), layout_V, extent_V);
cutlass::TensorView<ElementO, LayoutO> view_Ref_O_device(block_Ref_O.get() + offset_O.at(i) - offset_O.at(b * options.head_number), layout_O, extent_O);
cutlass::DeviceAllocation<ElementP> block_Ref_P(layout_P.capacity(extent_P));
cutlass::TensorView<ElementP, LayoutP> view_Ref_P_device(block_Ref_P.get(), layout_P, extent_P);
// Reference GEMM
cutlass::reference::device::GemmComplex<
ElementQ, LayoutQ,
ElementK, LayoutK,
ElementP, LayoutP,
ElementCompute, ElementAccumulator
>(
problem0,
ElementAccumulator(options.alpha0),
view_Q,
Attention::MM0::Mma::kTransformA,
view_K,
Attention::MM0::Mma::kTransformB,
ElementAccumulator(options.beta),
view_Ref_P_device,
view_Ref_P_device,
ElementAccumulator(0)
);
// Compute softmax for P. We need to explicitly compute softmax
// over P because softmax is fused to the second GEMM in the
// profiled implementation.
std::vector<ElementP> matrix_Ref(layout_P.capacity(extent_P));
cutlass::device_memory::copy_to_host(matrix_Ref.data(), block_Ref_P.get(), matrix_Ref.size());
cutlass::TensorView<ElementP, LayoutP> view_Ref_host(matrix_Ref.data(), layout_P, extent_P);
std::vector<ElementNorm> vector_Norm_Ref(problem0.m());
std::vector<ElementSum> vector_Sum_Ref(problem0.m());
int n_dim = options.use_mask ? options.problem_sizes0_real.at(i).n() : problem0.n();
// Compute softmax for reference matrix
for (int m = 0; m < problem0.m(); m++) {
int n_dim_row = n_dim;
if (options.causal) {
n_dim_row = std::min(m + 1, n_dim);
}
ElementSoftmaxCompute max = ElementSoftmaxCompute(view_Ref_host.ref().at({m, 0}));
for (int n = 1; n < n_dim_row; n++) {
max = std::max(max, ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})));
}
vector_Norm_Ref.at(m) = ElementNorm(max);
ElementSoftmaxCompute sum = ElementSoftmaxCompute();
for (int n = 0; n < n_dim_row; n++) {
sum += std::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max );
}
ElementSoftmaxCompute inv_sum = ElementSoftmaxCompute(1.0f / sum);
vector_Sum_Ref.at(m) = ElementSum(inv_sum);
for (int n = 0; n < n_dim_row; n++) {
view_Ref_host.ref().at({m, n}) = ElementP(
std::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max ) * inv_sum
);
}
// Mask out the rest of the attention matrix
for (int n = n_dim_row; n < n_dim; ++n) {
view_Ref_host.ref().at({m, n}) = ElementP(0);
}
}
// when not using mask, problem_real and problem share the same sizes
if (options.use_mask) {
for (int m = 0; m < problem0.m(); m++) {
for (int n = n_dim; n < problem0.n(); n++) {
view_Ref_host.ref().at({m, n}) = ElementP(0);
}
}
}
cutlass::device_memory::copy_to_device(block_Ref_P.get(), matrix_Ref.data(), matrix_Ref.size());
// Reference GEMM
cutlass::reference::device::GemmComplex<
ElementP, LayoutP,
ElementV, LayoutV,
ElementO, LayoutO,
ElementCompute, ElementAccumulator
>(
problem1,
ElementAccumulator(options.alpha1),
view_Ref_P_device,
Attention::MM0::Mma::kTransformA,
view_V,
Attention::MM0::Mma::kTransformB,
ElementAccumulator(options.beta),
view_Ref_O_device,
view_Ref_O_device,
ElementAccumulator(0)
);
}
// Copy to host memory
std::vector<ElementO> matrix_Ref_O(layout_O.capacity(extent_O));
cutlass::device_memory::copy_to_host(matrix_Ref_O.data(), block_Ref_O.get(), matrix_Ref_O.size());
// printf("Pb %d: \n Q=(offset=%d, ldq=%d)\n K=(offset=%d, ldk=%d)\n O=(offset=%d, ldo=%d)\n",
// int(i), int(offset_Q[i]), int(ldq_host[i]), int(offset_K[i]), int(ldk_host[i]), int(offset_O[i]), int(ldo_host[i]));
bool verified_O = false;
if (!verified_O) {
verified_O = verify_tensor_<ElementO>(matrix_O, matrix_Ref_O);
}
passed = passed && verified_O;
if (!passed) {
std::cerr << "\n***\nError - problem " << i << " (batch " << b << ") failed the QA check\n***\n" << std::endl;
if (!verified_O) {
std::cout << "Final matrix output is incorrect" << std::endl;
}
return passed;
}
}
return passed;
}
public:
/// Executes a CUTLASS Attention kernel and measures runtime.
Result profile() {
Result result;
result.passed = false;
// Initialize the problem
initialize_();
typename Attention::Params p;
{ // set parameters
p.query_ptr = block_Q.get();
p.key_ptr = block_K.get();
p.value_ptr = block_V.get();
p.logsumexp_ptr = nullptr; // Only needed for bw
p.output_accum_ptr = nullptr;
if (Attention::kNeedsOutputAccumulatorBuffer) {
cudaMalloc(&p.output_accum_ptr, block_O.size() * sizeof(typename Attention::output_accum_t));
}
p.output_ptr = block_O.get();
// TODO: support arbitrary seq lengths
// if (cu_seqlens_q.has_value()) {
// p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr();
// p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr();
// }
p.scale = options.alpha0;
p.num_heads = options.head_number;
p.num_batches = options.batch_size;
p.head_dim = options.head_size;
p.head_dim_value = options.head_size_v;
p.num_queries = options.seq_length;
p.num_keys = options.seq_length_kv;
if (options.causal) {
p.custom_mask_type = Attention::CausalFromTopLeft;
}
// All tensors are in BMHK shapes
p.q_strideH = options.head_size;
p.k_strideH = options.head_size;
p.v_strideH = options.head_size_v;
p.q_strideM = int32_t(ldq_host[0]);
p.k_strideM = int32_t(ldk_host[0]);
p.v_strideM = int32_t(ldv_host[0]);
p.q_strideB = p.q_strideM * options.seq_length;
p.k_strideB = p.k_strideM * options.seq_length_kv;
p.v_strideB = p.v_strideM * options.seq_length_kv;
p.o_strideM = p.head_dim_value * p.num_heads;
}
// launch kernel :)
constexpr auto kernel_fn = attention_kernel_batched_impl<Attention>;
int smem_bytes = sizeof(typename Attention::SharedStorage);
if (smem_bytes > 0xc000) {
cudaFuncSetAttribute(kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
}
if (!Attention::check_supported(p)) {
std::cerr << "Kernel does not support these inputs" << std::endl;
return result;
}
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p);
// Wait for completion
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
//
// Verify correctness
//
result.passed = true;
if (options.reference_check) {
result.passed = verify_();
}
//
// Warm-up run
//
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p);
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS Attention kernel." << std::endl;
return result;
}
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMM operations
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p);
}
//
// Stop profiling loop
//
// Record an event when the GEMM operations have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
//
// Cleanup
//
for (auto event : events) {
(void)cudaEventDestroy(event);
}
std::cout << std::endl;
std::cout << "CUTLASS Attention:\n"
<< "====================================================" << std::endl;
std::cout << " " << " {seq length Q, seq length KV, head size, head size V, head number, batch size} = {" << options.seq_length \
<< ", " << options.seq_length_kv << ", " << options.head_size << ", " << options.head_size_v << ", " << options.head_number\
<< ", " << options.batch_size << "}." << std::endl;
std::cout << std::endl;
std::cout << " " << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " " << "GFLOPs: " << result.gflops << std::endl;
return result;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
int kQueriesPerBlock,
int kKeysPerBlock,
int kMaxK
>
int run_attention(Options& options) {
using Attention = AttentionKernel<
cutlass::half_t, // scalar_t
cutlass::arch::Sm80, // ArchTag
true, // Memory is aligned
kQueriesPerBlock,
kKeysPerBlock,
kMaxK,
false, // Supports dropout
false // Supports bias
>;
//
// Test and profile
//
TestbedAttention<Attention> testbed(options);
Result result = testbed.profile();
if (!result.passed) {
std::cout << "Profiling CUTLASS attention has failed.\n";
std::cout << "\nFailed\n";
return -1;
}
std::cout << "\nPassed\n";
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
//
// This example uses mma.sync to directly access Tensor Cores to achieve peak performance.
//
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) {
//
// This example requires an NVIDIA Ampere-architecture GPU.
//
std::cout
<< "CUTLASS's CUTLASS Attention example requires a GPU of NVIDIA's Ampere Architecture or "
<< "later (compute capability 80 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.error) {
std::cerr << "Aborting execution." << std::endl;
return -1;
}
if (options.use_mask) {
std::cerr << "--use_mask is not supported at the moment\n";
return -2;
}
if (options.alignment != 1) {
std::cerr << "--alignment=1 is the only supported value\n";
return -2;
}
// Determine kernel configuration based on head size.
// If head size is less than or equal to 64, each block operates over 64 queries and
// 64 keys, and partial results can be stored in the register file.
// If head size is greater than 64, each block operates over 32 queries and 128 keys,
// and partial results are stored in shared memory.
if (options.head_size_v > 64) {
static int const kQueriesPerBlock = 32;
static int const kKeysPerBlock = 128;
if (options.head_size_v <= 128) {
return run_attention<kQueriesPerBlock, kKeysPerBlock, 128>(options);
} else {
return run_attention<kQueriesPerBlock, kKeysPerBlock, 65536>(options);
}
} else {
static constexpr int kMaxK = 64; // <- Decrease to 32/16 if your problem is smaller
static int const kQueriesPerBlock = 64;
static int const kKeysPerBlock = 64;
return run_attention<kQueriesPerBlock, kKeysPerBlock, kMaxK>(options);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/fused_multihead_attention_fixed_seqlen.cu/0 | {
"file_path": "examples/41_fused_multi_head_attention/fused_multihead_attention_fixed_seqlen.cu",
"repo_id": "examples",
"token_count": 15573
} | 6 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Inspired from
"cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h" Loads tiles of GEMM
operands from a RowMajor shared-memory layout into registers to use by A100
TensorCores.
The difference with "mma_tensor_op_tile_access_iterator.h" is that:
(1) We use "ldmatrix" to load tiles, rather than manual loads (slightly
faster) (2) We support to transpose the operand (eg read `A.transpose()` when
the shared memory holds `A`)
This is only implemented for the specific shapes.
*/
#pragma once
#include <cutlass/gemm/gemm.h>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
template <
/// Operand identity
Operand Operand_,
/// Data type of A elements
typename Element_,
typename InstructionShape_,
bool kTranspose = false>
class WarpIteratorFromSmem {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = cutlass::MatrixShape<32, 32>;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(
kOperand == Operand::kA,
"No support for OperandB at the moment");
/// Basic check
static_assert(
kOperand == Operand::kA || kOperand == Operand::kB,
"WarpIteratorFromSmem may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
static_assert(sizeof_bits<Element>::value == 16, "Only supported for half");
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
static_assert(InstructionShape::kRow == 16, "Only supports 16x8x8 / 16x8x16");
static_assert(
InstructionShape::kColumn == 8 || InstructionShape::kColumn == 16,
"Only supports 16x8x8 / 16x8x16");
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = 1;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Number of elements accessed per Shared Memory load
static int const kElementsPerAccess =
(sizeof_bits<Element>::value >= 32 ? 1
: 32 / sizeof_bits<Element>::value);
using InstructionCount = MatrixShape<
Shape::kRow / InstructionShape::kRow,
Shape::kColumn / InstructionShape::kColumn>;
static int const kIterations = (kOperand == Operand::kA)
? InstructionCount::kColumn
: InstructionCount::kRow;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
(kOperand == Operand::kA)
? (Shape::kRow* InstructionShape::kColumn / kThreads)
: (Shape::kColumn* InstructionShape::kRow / kThreads)>;
/// Memory access type
// using AccessType = AlignedArray<Element, kElementsPerAccess>;
using AccessType = Array<unsigned, 4>;
static int constexpr kWarpShapeDivisibleInner =
(kOperand == Operand::kA ? InstructionShape::kColumn
: InstructionShape::kRow);
static int constexpr kAccessesInner =
(kWarpShapeDivisibleInner / kElementsPerAccess) / 4;
// Number of 32bits tiles to load per `ldmatrix`
static int const kTilesPerInstruction = InstructionShape::kRow / 8;
static_assert(kTilesPerInstruction == 2, "Only supports 16x8x16 and 16x8x8");
private:
/// Underlying tensor reference
TensorRef ref_;
/// Origin
MatrixCoord origin_;
/// Iterations in a tile
int iterations_;
public:
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
WarpIteratorFromSmem(TensorRef const& ref, int lane_id)
: WarpIteratorFromSmem(ref, {Shape::kRow, Shape::kColumn}, lane_id) {}
CUTLASS_HOST_DEVICE
WarpIteratorFromSmem(TensorRef const& ref, TensorCoord extent, int lane_id)
: ref_(ref), iterations_(0) {
// See also:
// https://docs.nvidia.com/cuda/archive/11.7.1/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-1688
// 16x8x8: kAccessesInner = 1 (1 ldmatrix.x4)
// 16x8x16: kAccessesInner = 2 (2 ldmatrix.x4)
int ldsm_vec_num = (lane_id >> 3);
if (kOperand == Operand::kA) {
origin_ = MatrixCoord(lane_id % 8, 0);
static_assert(
InstructionCount::kRow * kTilesPerInstruction == 4,
"can't use ldmatrix.x4");
int access_m_idx = ldsm_vec_num % kTilesPerInstruction;
int inner_idx = (ldsm_vec_num / kTilesPerInstruction) % kAccessesInner;
int inst_m_idx = ldsm_vec_num / (kTilesPerInstruction * kAccessesInner);
MatrixCoord offset(
access_m_idx * 8 + inst_m_idx * InstructionShape::kRow,
inner_idx * 4 * kElementsPerAccess);
if (kTranspose) {
offset = MatrixCoord(offset.column(), offset.row());
}
origin_ += offset;
} else {
// Note: This is not tested or used
origin_ = MatrixCoord(0, lane_id % 8);
static_assert(InstructionCount::kColumn * kAccessesInner == 4, "");
CUTLASS_PRAGMA_UNROLL
for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn;
++inst_n_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
int access_idx = inner_idx + kAccessesInner * inst_n_idx;
MatrixCoord offset(
inner_idx * 4 * kElementsPerAccess, inst_n_idx * 8);
if (access_idx == ldsm_vec_num) {
if (kTranspose) {
offset = MatrixCoord(offset.column(), offset.row());
}
origin_ += offset;
}
}
}
}
ref_.add_coord_offset(origin_);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
WarpIteratorFromSmem& add_tile_offset(TensorCoord const& tile_offset) {
TensorCoord coord_offset(
tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
if (kTranspose) {
coord_offset = TensorCoord{coord_offset.column(), coord_offset.row()};
}
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
void advance() {
if (kOperand == Operand::kA) {
add_tile_offset({0, 1});
} else {
add_tile_offset({1, 0});
}
iterations_ = 0;
}
/// increase iterations in a tile
CUTLASS_HOST_DEVICE
WarpIteratorFromSmem& operator++() {
iterations_++;
if (iterations_ >= kIterations)
advance();
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_DEVICE
void load(Fragment& frag) const {
AccessType* access_ptr = reinterpret_cast<AccessType*>(&frag);
using LoadLayout = typename platform::
conditional<kTranspose, layout::ColumnMajor, layout::RowMajor>::type;
CUTLASS_PRAGMA_UNROLL
for (int access_m_idx = 0; access_m_idx <
(InstructionCount::kRow * kTilesPerInstruction * kAccessesInner) / 4;
++access_m_idx) {
MatrixCoord offset;
if (kOperand == Operand::kA) {
offset = MatrixCoord(
access_m_idx * 16, iterations_ * InstructionShape::kColumn);
} else {
offset = MatrixCoord(iterations_ * InstructionShape::kRow, 0);
}
if (kTranspose) {
offset = MatrixCoord(offset.column(), offset.row());
}
cutlass::arch::ldsm<LoadLayout, 4>(
access_ptr[access_m_idx], ref_.data() + ref_.offset(offset));
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/iterators/warp_iterator_from_smem.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/iterators/warp_iterator_from_smem.h",
"repo_id": "examples",
"token_count": 3593
} | 7 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_conversion.h"
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Size of the accumulation tile shape (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Accumulator Element type
typename ElementAccumulator_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Whether beta is zero
bool IsBetaZero_ >
class MmaTensorOpPureFragmentIterator;
// Partial specialization for col-major accumulator tile
// And Element type is the same as Accumulator Element type
template <
/// Shape of warp tile to load (concept: MatrixShape)
typename Shape_,
/// Shape of the warp accumulation tile (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_>
class MmaTensorOpPureFragmentIterator<Shape_, AccumulatorShape_, KBlocksColumn_, Element_, Element_,
cutlass::layout::ColumnMajor,
InstructionShape_, true> {
public:
/// Shape of warp tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of the warp accumulation tile (concept: MatrixShape)
using AccumulatorShape = AccumulatorShape_;
/// KBlocks columns to compute residual
static int const kKBlockColumn = KBlocksColumn_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Whether beta is zero
static bool const IsBetaZero = true;
/// Number of participating threads
static int const kThreads = 32;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(
!(AccumulatorShape::kRow % Shape::kRow) &&
!(AccumulatorShape::kColumn % Shape::kColumn),
"Shape of Warp Accumulator must be divisible by warp shape.");
static_assert(
!(kKBlockColumn % Shape::kColumn),
"KBlock size must be divisible by warp shape.");
/// Number of times this iterator can be incremented
static int const kIterations = AccumulatorShape::kCount / Shape::kCount;
};
private:
static int const kElementsPerAccess = InstructionShape::kM * InstructionShape::kN / kThreads;
/// Number of mma operations performed by a warp
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
/// Number of mma operations performed by the entire accumulator
using AccumulatorIterations = MatrixShape<AccumulatorShape::kRow / InstructionShape::kM,
AccumulatorShape::kColumn / InstructionShape::kN>;
/// Number of K iterations
static int const kKBlockIterations = (AccumulatorShape::kColumn + kKBlockColumn - 1) / kKBlockColumn;
static int const kResidualColumn = AccumulatorShape::kColumn - (kKBlockIterations - 1) * kKBlockColumn;
static int const kKBlockColumnIterations = kKBlockColumn / Shape::kColumn
* (AccumulatorShape::kRow / Shape::kRow);
static int const kResidualIndex = kResidualColumn / Shape::kColumn
* (AccumulatorShape::kRow / Shape::kRow);
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<Element, Shape::kCount / kThreads>;
/// Accumulator Fragment object
using AccumulatorFragment = Array<Element, AccumulatorShape::kCount / kThreads>;
private:
/// Internal access type
using AccessType = Array<Element, kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
/// Used to access residual tile first
bool is_residual_tile_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
MmaTensorOpPureFragmentIterator(AccumulatorFragment const &accum)
: accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0), is_residual_tile_(true) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
index_ += index_offset;
if(is_residual_tile_ && index_ >= kKBlockColumnIterations) {
index_ = index_ - kKBlockColumnIterations + kResidualIndex;
is_residual_tile_ = false;
}
}
/// Increments
CUTLASS_HOST_DEVICE
MmaTensorOpPureFragmentIterator &operator++() {
add_offset(1);
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
MmaTensorOpPureFragmentIterator &operator--() {
add_offset(-1);
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
AccessType src_fragment;
src_fragment.clear();
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
int index_m = (index_ * MmaIterations::kRow) % AccumulatorIterations::kRow;
int index_n = (index_ * MmaIterations::kRow) / AccumulatorIterations::kRow
* MmaIterations::kColumn;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; n++) {
for (int m = 0; m < MmaIterations::kRow; m++) {
int accumulator_access_offset =
(n + index_n) * AccumulatorIterations::kRow + m + index_m;
frag_ptr[n * MmaIterations::kRow + m].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[n * MmaIterations::kRow + m] = accumulators_[accumulator_access_offset];
// frag_ptr[n * MmaIterations::kRow + m] = output_op(accumulators_[accumulator_access_offset], src_fragment);
}
}
}
};
// Partial specialization for row-major accumulator tile
template <
/// Shape of warp tile to load (concept: MatrixShape)
typename Shape_,
/// Shape of the warp accumulation tile (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Accumulator Element type
typename ElementAccumulator_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_>
class MmaTensorOpPureFragmentIterator<Shape_, AccumulatorShape_, KBlocksColumn_, ElementAccumulator_, Element_,
cutlass::layout::RowMajor,
InstructionShape_, true> {
public:
/// Shape of warp tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of the warp accumulation tile (concept: MatrixShape)
using AccumulatorShape = AccumulatorShape_;
/// KBlocks columns to compute residual
static int const kKBlockColumn = KBlocksColumn_;
/// Accumulator Element type
using ElementAccumulator = ElementAccumulator_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Whether beta is zero
static bool const IsBetaZero = true;
/// Number of participating threads
static int const kThreads = 32;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(
!(AccumulatorShape::kRow % Shape::kRow) &&
!(AccumulatorShape::kColumn % Shape::kColumn),
"Shape of Warp Accumulator must be divisible by warp shape.");
static_assert(
!(kKBlockColumn % Shape::kColumn),
"KBlock size must be divisible by warp shape.");
/// Number of times this iterator can be incremented
static int const kIterations = AccumulatorShape::kCount / Shape::kCount;
};
private:
static int const kElementsPerAccess = InstructionShape::kM * InstructionShape::kN / kThreads;
/// Number of mma operations performed by a warp
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
/// Number of mma operations performed by the entire accumulator
using AccumulatorIterations = MatrixShape<AccumulatorShape::kRow / InstructionShape::kM,
AccumulatorShape::kColumn / InstructionShape::kN>;
/// Number of K iterations
static int const kKBlockIterations = (AccumulatorShape::kColumn + kKBlockColumn - 1) / kKBlockColumn;
static int const kResidualColumn = AccumulatorShape::kColumn - (kKBlockIterations - 1) * kKBlockColumn;
static int const kKBlockColumnIterations = kKBlockColumn / Shape::kColumn
* (AccumulatorShape::kRow / Shape::kRow);
static int const kResidualIndex = kResidualColumn / Shape::kColumn
* (AccumulatorShape::kRow / Shape::kRow);
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<Element, Shape::kCount / kThreads>;
/// Accumulator Fragment object
using AccumulatorFragment = Array<ElementAccumulator, AccumulatorShape::kCount / kThreads>;
private:
/// Internal access type
using AccessType = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentAccessType = Array<Element, kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
/// Used to access residual tile first
bool is_residual_tile_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
MmaTensorOpPureFragmentIterator(AccumulatorFragment const &accum)
: accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0), is_residual_tile_(true) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
index_ += index_offset;
if(is_residual_tile_ && index_ >= kKBlockColumnIterations) {
index_ = index_ - kKBlockColumnIterations + kResidualIndex;
is_residual_tile_ = false;
}
}
/// Increments
CUTLASS_HOST_DEVICE
MmaTensorOpPureFragmentIterator &operator++() {
add_offset(1);
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
MmaTensorOpPureFragmentIterator &operator--() {
add_offset(-1);
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
FragmentAccessType src_fragment;
src_fragment.clear();
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
int index_m = (index_ * MmaIterations::kRow) % AccumulatorIterations::kRow;
int index_n = (index_ * MmaIterations::kRow) / AccumulatorIterations::kRow
* MmaIterations::kColumn;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; m++) {
for (int n = 0; n < MmaIterations::kColumn; n++) {
int accumulator_access_offset =
(m + index_m) * AccumulatorIterations::kColumn + n + index_n;
frag_ptr[m * MmaIterations::kColumn + n].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[m * MmaIterations::kColumn + n] = (accumulators_[accumulator_access_offset]);
}
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/44_multi_gemm_ir_and_codegen/fixed_impl/gemm/warp/mma_tensor_op_fragment_iterator_without_output_op.h/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/fixed_impl/gemm/warp/mma_tensor_op_fragment_iterator_without_output_op.h",
"repo_id": "examples",
"token_count": 5122
} | 8 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Simple Hopper GEMM example using CUTLASS 3.0 APIs for NVIDIA Hopper architecture
This example demonstrate a simple way to instantiate and run a TF32 GEMM using the new CUTLASS 3.0
APIs on NVIDIA Hopper architecture. New features that will be showcased in this example are as follows:
1. NVIDIA Hopper architecture introduces a new series of tensor core instructions (GMMA)
which are more efficient than the Ampere tensor core instructions.
2. NVIDIA Hopper architecture includes new Tensor Memory Accelerator (TMA) unit to transfer large
blocks of data efficiently between global memory and shared memory. TMA also supports asynchronous
copies between thread blocks in a cluster. Another advantage is that TMA can load in FP32 data and
convert them implicitly to TF32.
3. This example uses the Warp Specialized kernel design (see /media/docs/efficient_gemm.md for details).
4. A simple way to tune the CTA rasterization direction and swizzle pattern of Hopper kernels. Both the
CTA rasterization direction and swizzle pattern impact cross-CTA locality of accesses. By tuning we can
improve performance.
Examples:
$ ./examples/48_hopper_warp_specialized_gemm/48_hopper_warp_specialized_gemm --m=2048 --n=2048 --k=2048 --rasterization=N --swizzle=2
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cutlass/tensor_ref.h"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/gemm/kernel/tile_scheduler_params.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "helper.h"
using namespace cute;
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM kernel configurations
/////////////////////////////////////////////////////////////////////////////////////////////////
// A matrix configuration
using ElementA = float; // Element type for A matrix operand
using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand
constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes)
// B matrix configuration
using ElementB = float; // Element type for B matrix operand
using LayoutB = cutlass::layout::ColumnMajor; // Layout type for B matrix operand
constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes)
// C/D matrix configuration
using ElementC = float; // Element type for C and D matrix operands
using LayoutC = cutlass::layout::ColumnMajor; // Layout type for C and D matrix operands
constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value; // Memory access granularity/alignment of C matrix in units of elements (up to 16 bytes)
// Core kernel configurations
using ElementAccumulator = float; // Element type for internal accumulation
using ArchTag = cutlass::arch::Sm90; // Tag indicating the minimum SM that supports the intended feature
using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag
using TileShape = Shape<_128,_128,_32>; // Threadblock-level tile size
using ClusterShape = Shape<_1,_2,_1>; // Shape of the threadblocks in a cluster
using StageCountType = cutlass::gemm::collective::StageCountAuto; // Stage count maximized based on the tile size
using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto; // Kernel to launch based on the default setting in the Collective Builder
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape, ClusterShape,
cutlass::epilogue::collective::EpilogueTileAuto,
ElementAccumulator, ElementAccumulator,
ElementC, LayoutC, AlignmentC,
ElementC, LayoutC, AlignmentC,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
ArchTag, OperatorClass,
ElementA, LayoutA, AlignmentA,
ElementB, LayoutB, AlignmentB,
ElementAccumulator,
TileShape, ClusterShape,
cutlass::gemm::collective::StageCountAutoCarveout<
static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int>, // Indicates ProblemShape
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
// Reference device GEMM implementation type
using DeviceGemmReference = cutlass::reference::device::Gemm<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
ElementAccumulator>;
using StrideA = typename Gemm::GemmKernel::StrideA;
using StrideB = typename Gemm::GemmKernel::StrideB;
using StrideC = typename Gemm::GemmKernel::StrideC;
using StrideD = typename Gemm::GemmKernel::StrideD;
//
// Data members
//
/// Initialization
StrideA stride_A;
StrideB stride_B;
StrideC stride_C;
StrideD stride_D;
uint64_t seed;
cutlass::DeviceAllocation<typename Gemm::ElementA> block_A;
cutlass::DeviceAllocation<typename Gemm::ElementB> block_B;
cutlass::DeviceAllocation<typename Gemm::ElementC> block_C;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput> block_D;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput> block_ref_D;
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Testbed utility types
/////////////////////////////////////////////////////////////////////////////////////////////////
using RasterOrderOptions = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90Params::RasterOrderOptions;
// Command line options parsing
struct Options {
bool help;
float alpha, beta;
int iterations;
int m, n, k;
RasterOrderOptions raster;
int swizzle;
Options():
help(false),
m(5120), n(4096), k(4096),
alpha(1.f), beta(0.f),
iterations(1000),
raster(RasterOrderOptions::Heuristic),
swizzle(1)
{ }
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("m", m);
cmd.get_cmd_line_argument("n", n);
cmd.get_cmd_line_argument("k", k);
cmd.get_cmd_line_argument("alpha", alpha, 1.f);
cmd.get_cmd_line_argument("beta", beta, 0.f);
cmd.get_cmd_line_argument("iterations", iterations);
char raster_char;
cmd.get_cmd_line_argument("raster", raster_char);
if (raster_char == 'N' || raster_char == 'n') {
raster = RasterOrderOptions::AlongN;
}
else if (raster_char == 'M' || raster_char == 'm') {
raster = RasterOrderOptions::AlongM;
}
else if (raster_char == 'H' || raster_char == 'h') {
raster = RasterOrderOptions::Heuristic;
}
cmd.get_cmd_line_argument("swizzle", swizzle, 1);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "48_hopper_warp_specialized_gemm\n\n"
<< " Hopper FP32 GEMM using a Warp Specialized kernel.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement\n\n"
<< " --m=<int> Sets the M extent of the GEMM\n"
<< " --n=<int> Sets the N extent of the GEMM\n"
<< " --k=<int> Sets the K extent of the GEMM\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --raster=<char> CTA Rasterization direction (N for along N, M for along M, and H for heuristic)\n\n"
<< " --swizzle=<int> CTA Rasterization swizzle\n\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n\n";
out
<< "\n\nExamples:\n\n"
<< "$ " << "48_hopper_warp_specialized_gemm" << " --m=1024 --n=512 --k=1024 --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const
{
// Two flops per multiply-add
uint64_t flop = uint64_t(2) * m * n * k;
double gflop = double(flop) / double(1.0e9);
return gflop / runtime_s;
}
};
/// Result structure
struct Result
{
double avg_runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
Result(
double avg_runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess)
:
avg_runtime_ms(avg_runtime_ms), gflops(gflops), status(status), error(error), passed(false)
{}
};
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM setup and evaluation
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to initialize a block of device data
template <class Element>
bool initialize_block(
cutlass::DeviceAllocation<Element>& block,
uint64_t seed=2023) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::device::BlockFillRandomUniform(
block.get(), block.size(), seed, scope_max, scope_min, 0);
return true;
}
/// Initialize operands to be used in the GEMM and reference GEMM
void initialize(const Options &options) {
stride_A = cutlass::make_cute_packed_stride(StrideA{}, {options.m, options.k, 1});
stride_B = cutlass::make_cute_packed_stride(StrideB{}, {options.n, options.k, 1});
stride_C = cutlass::make_cute_packed_stride(StrideC{}, {options.m, options.n, 1});
stride_D = cutlass::make_cute_packed_stride(StrideD{}, {options.m, options.n, 1});
block_A.reset(options.m * options.k);
block_B.reset(options.k * options.n);
block_C.reset(options.m * options.n);
block_D.reset(options.m * options.n);
block_ref_D.reset(options.m * options.n);
initialize_block(block_A, seed + 2023);
initialize_block(block_B, seed + 2022);
initialize_block(block_C, seed + 2021);
}
/// Populates a Gemm::Arguments structure from the given commandline options
typename Gemm::Arguments args_from_options(const Options &options)
{
typename Gemm::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
{options.m, options.n, options.k},
{block_A.get(), stride_A, block_B.get(), stride_B},
{{options.alpha, options.beta}, block_C.get(), stride_C, block_D.get(), stride_D}
};
arguments.scheduler.raster_order = options.raster;
// The tile scheduler will swizzle up to 8 and with the nearest multiple of 2 (i.e., 1, 2, 4, and 8)
arguments.scheduler.max_swizzle_size = options.swizzle;
return arguments;
}
bool verify(const Options &options) {
cutlass::TensorRef ref_A(block_A.get(), Gemm::LayoutA::packed({options.m, options.k}));
cutlass::TensorRef ref_B(block_B.get(), Gemm::LayoutB::packed({options.k, options.n}));
cutlass::TensorRef ref_C(block_C.get(), Gemm::LayoutC::packed({options.m, options.n}));
cutlass::TensorRef ref_D(block_ref_D.get(), Gemm::LayoutD::packed({options.m, options.n}));
//
// Compute reference output
//
// Create instantiation for device reference gemm kernel
DeviceGemmReference gemm_reference;
// Launch device reference gemm kernel
gemm_reference(
{options.m, options.n, options.k},
ElementAccumulator(options.alpha),
ref_A,
ref_B,
ElementAccumulator(options.beta),
ref_C,
ref_D);
// Wait for kernel to finish
CUDA_CHECK(cudaDeviceSynchronize());
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::device::BlockCompareEqual(block_ref_D.get(), block_D.get(), block_D.size());
return passed;
}
/// Execute a given example GEMM computation
template <typename Gemm>
int run(Options &options)
{
initialize(options);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm;
// Create a structure of gemm kernel arguments suitable for invoking an instance of Gemm
auto arguments = args_from_options(options);
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check if the problem size is supported or not
CUTLASS_CHECK(gemm.can_implement(arguments));
// Initialize CUTLASS kernel with arguments and workspace pointer
CUTLASS_CHECK(gemm.initialize(arguments, workspace.get()));
// Correctness / Warmup iteration
CUTLASS_CHECK(gemm.run());
// Check if output from CUTLASS kernel and reference kernel are equal or not
Result result;
result.passed = verify(options);
std::cout << " Disposition: " << (result.passed ? "Passed" : "Failed") << std::endl;
if (!result.passed) {
exit(-1);
}
// Run profiling loop
if (options.iterations > 0)
{
GpuTimer timer;
timer.start();
for (int iter = 0; iter < options.iterations; ++iter) {
CUTLASS_CHECK(gemm.initialize(arguments, workspace.get()));
CUTLASS_CHECK(gemm.run());
}
timer.stop();
// Compute average runtime and GFLOPs.
float elapsed_ms = timer.elapsed_millis();
result.avg_runtime_ms = double(elapsed_ms) / double(options.iterations);
result.gflops = options.gflops(result.avg_runtime_ms / 1000.0);
std::string raster = "Heuristic";
if (options.raster == RasterOrderOptions::AlongN) {
raster = "Along N";
}
else if (options.raster == RasterOrderOptions::AlongM) {
raster = "Along M";
}
std::cout << " Problem Size: " << options.m << 'x' << options.n << 'x' << options.k << std::endl;
std::cout << " Rasterization: " << raster << " with a maximum CTA swizzle of " << options.swizzle << std::endl;
std::cout << " Avg runtime: " << result.avg_runtime_ms << " ms" << std::endl;
std::cout << " GFLOPS: " << result.gflops << std::endl;
}
return 0;
}
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
// CUTLASS must be compiled with CUDA 12.0 Toolkit to run this example
// and must have compute capability at least 90.
if (__CUDACC_VER_MAJOR__ < 12) {
std::cerr << "This example requires CUDA 12 or newer.\n";
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
cudaDeviceProp props;
int current_device_id;
CUDA_CHECK(cudaGetDevice(¤t_device_id));
CUDA_CHECK(cudaGetDeviceProperties(&props, current_device_id));
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (props.major < 9) {
std::cerr
<< "This example requires a GPU of NVIDIA's Hopper Architecture or "
<< "later (compute capability 90 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
//
// Evaluate CUTLASS kernels
//
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
run<Gemm>(options);
#endif
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/48_hopper_warp_specialized_gemm/48_hopper_warp_specialized_gemm.cu/0 | {
"file_path": "examples/48_hopper_warp_specialized_gemm/48_hopper_warp_specialized_gemm.cu",
"repo_id": "examples",
"token_count": 6694
} | 9 |
<jupyter_start><jupyter_text>Basic example of using the CUTLASS Python interfaceThis notebook walks through a basic example of using the CUTLASS Python interface to declare, compile, and run GEMMs.[](https://colab.research.google.com/github/NVIDIA/cutlass/blob/main/examples/python/00_basic_gemm.ipynb) Prerequisites for running on ColabThis notebook requires an NVIDIA GPU. If `nvidia-smi` fails, go to Runtime -> Change runtime type -> Hardware accelerator and confirm a GPU is selected.<jupyter_code>!#nvidia-smi<jupyter_output><empty_output><jupyter_text>If running on Colab, you will need to install the CUTLASS Python interface. To do so, uncomment the following line and run the cell:<jupyter_code>!#pip install nvidia-cutlass<jupyter_output><empty_output><jupyter_text>General setupWe first import various packages needed for the example and construct the input and output tensors that will be used in our example.<jupyter_code>import numpy as np
import random
import cutlass
# This controls whether the C++ GEMM declaration will be printed at each step.
# Set to `False` to omit this information.
print_module = True
m = 128
n = m
k = m
dtype = np.float16
type_A = np.float16
type_B = np.float16
type_C = np.float16
type_D = np.float16
np.random.seed(1234)
random.seed(1234)
scope_min = -4
scope_max = 4
tensor_A = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, k)).astype(type_A))
tensor_B = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(k, n)).astype(type_B))
tensor_C = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, n)).astype(type_C))
alpha = np.float16(1.)
beta = np.float16(0.)
tensor_D = np.zeros(tensor_C.shape).astype(type_D)<jupyter_output><empty_output><jupyter_text>Declaring and running a GEMMTo get started, one only needs to provide the tensors declared above to the `cutlass.op.Gemm` call.This sets up a default GEMM operation for the given device on which you are running.Assuming that we are running on SM80, this default to using a GEMM that leverages FP16 Tensor Core operations.Calling `plan.run()` will generate the CUTLASS C++ kernel in question, compile it, and run it on the tensors we previously passed in. By setting `print_module` to `true`, the C++ code that is emitted is printed.<jupyter_code># We specify `element_accumulator` here so as to match the kernel run by NumPy below. However,
# specifying `element_accumulator` is not required if it is the same as `element`
plan = cutlass.Gemm(element=dtype, layout=cutlass.LayoutType.RowMajor, element_accumulator=np.float32)
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, print_module=print_module)<jupyter_output><empty_output><jupyter_text>There are many other ways to construct a plan from `cutlass.op.Gemm` (e.g., by specifiying they types and layouts of each operand, by providing representative tensors as inputs). For more details on these, see the documentation in the `cutlass.op.Gemm` constructor. We then compare the output to running the GEMM using NumPy.<jupyter_code>tensor_D_numpy = (alpha * (tensor_A @ tensor_B)) + (beta * tensor_C)
np.testing.assert_array_equal(tensor_D, tensor_D_numpy)<jupyter_output><empty_output><jupyter_text>Note that one could use the same kernel just declared for tensors provided by other frameworks beyond NumPy, such as PyTorch or CuPy. Changing operation modesBy default, the CUTLASS Python interface will try to use Tensor Core operations whenever possible. If the configuration provided to `cutlass.op.Gemm` is not supported on Tensor Cores, the interface will fall back to using a SIMT kernel.The operation mode currently in use can be returned via the `plan.opclass` property. In this case Tensor Core operations.<jupyter_code>print(plan.opclass)<jupyter_output><empty_output><jupyter_text>Suppose that we don't want to use Tensor Cores for this GEMM. One can change to using CUTLASS's SIMT GEMMs by setting the plan's `opclass` field.As is shown in the printed output, the emitted kernel uses template parameters that fit CUTLASS's SIMT GEMMs.Also notice that, this time around, we provided tensor parameters to `plan.run()`. One is free to provide different parameters to `plan.run()` than were passed in at the initial call to `cutlass.op.Gemm`, provided that the passed-in tensors have the same data type and layout as those passed in on intialization.<jupyter_code>tensor_D_simt = np.zeros(tensor_C.shape).astype(type_D)
plan.opclass = cutlass.OpcodeClass.Simt
plan.run(tensor_A, tensor_B, tensor_C, tensor_D_simt, alpha, beta, print_module=print_module)<jupyter_output><empty_output><jupyter_text>If we compare the output of the Tensor Core and SIMT GEMMs we just ran we see that they are equal.<jupyter_code>np.testing.assert_array_equal(tensor_D, tensor_D_simt)<jupyter_output><empty_output><jupyter_text>Running cached kernelsYou may have noticed that the `plan.run()` calls for the previous two kernels took some time to execute. This is because the kernel being emitted had not yet been compiled.CUTLASS caches compiled binaries so that recompilation isn't necessary every time a kernel is run. For example, if we change modes back to using Tensor Cores and call `plan.run()` again (with a different set of tensor parameters), you'll find the call to return much faster.<jupyter_code>m = 2400
n = 3232
k = 4096
tensor_A = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, k)).astype(type_A))
tensor_B = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(k, n)).astype(type_B))
tensor_C = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, n)).astype(type_C))
tensor_D = np.zeros(tensor_C.shape).astype(type_D)
alpha = np.float16(1.)
beta = np.float16(2.)
plan.opclass = cutlass.OpcodeClass.TensorOp
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, print_module=print_module)<jupyter_output><empty_output><jupyter_text>Running non-default GEMMsThe previous examples showed how it is simple to get started running a default GEMM kernel in CUTLASS. But, what do you do if you want a bit more control over the parameters to the GEMM?Under the hood, CUTLASS enumerates the different GEMM configuration parameters possible for this kernel from the CUTLASS profiler. The code below shows how one can access the tile descriptions for the kernels (e.g., cluster, threadblock, and warp shape).<jupyter_code>tiles = plan.tile_descriptions()
print('{} tile descriptions returned'.format(len(tiles)))
num_print = 10
print('First {} tile descriptions are:'.format(num_print))
for td in tiles[:num_print]:
print(td)<jupyter_output><empty_output><jupyter_text>Next, we'll pick one of these configurations at random and compile and run it.<jupyter_code>tiles = [td for td in tiles if td.threadblock_shape[0] >= 128]
idx = random.randint(0, len(tiles)-1)
td = tiles[idx]
print('Tile description {} is: {}'.format(idx, td))
plan.compile(td)
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, print_module=print_module)<jupyter_output><empty_output><jupyter_text>One can also change the swizzling function used by the kernel. For example, one can modify the kernel to use the stream K feature of CUTLASS via:<jupyter_code># Stream K is exposed through the threadblock swizzle method for pre-SM90 kernels,
# and via the tile_scheduler attribute of the TileDescription for post-SM90 kernels
if plan.cc < 90:
plan.swizzling_functor = cutlass.swizzle.ThreadblockSwizzleStreamK
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, print_module=print_module)
else:
# Stream-K is currently only supported for warp-specialized cooperative kernels
td.kernel_schedule = cutlass.KernelScheduleType.TmaWarpSpecializedCooperative
td.epilogue_schedule = cutlass.EpilogueScheduleType.TmaWarpSpecializedCooperative
td.tile_scheduler = cutlass.TileSchedulerType.StreamK
plan.compile(td)
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, print_module=print_module)<jupyter_output><empty_output><jupyter_text>Handling errorsThe CUTLASS Python interface attempts to catch runtime and compilation errors in Python so as to provide more understandable error messages.Here's an example in which we try to use too many stages for a given GEMM kernel. Normally, this would result in a runtime error due to the GPU having insufficient shared memory to launch the kernel with 8 stages. The CUTLASS Python interface is able to detect this issue before compiling the kernel, and reports it back to the user. Uncomment and run the code below to see this error.<jupyter_code># td = tiles[0]
# td.stages = 8
# plan.compile(td)<jupyter_output><empty_output><jupyter_text>Specializations for other data typesVarious CUTLASS kernels specialized for specific data types can also be run via the Python interface.For example, the code below shows how to declare and run a GEMM using the 3xTF32 feature (see corresponding C++ example [here](https://github.com/NVIDIA/cutlass/blob/main/examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm/27_ampere_3xtf32_fast_accurate_tensorop_gemm.cu)).<jupyter_code>from cutlass.backend.utils.device import device_cc
# 3xTF32 requires SM80 or higher
if device_cc() >= 80:
plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor)
plan.math_operation = cutlass.MathOperation.multiply_add_fast_f32
# Create input/output tensors in FP32
A, B = [np.ones((128, 128)).astype(np.float32) for _ in range(2)]
C, D = [np.zeros((128, 128)).astype(np.float32) for _ in range(2)]
# Run the GEMM
plan.run(A, B, C, D, print_module=print_module)<jupyter_output><empty_output><jupyter_text>Additionally, one can run CUTLASS's FP8 GEMMs if using a frontend library capable of allocating and initializing FP8 tensors (e.g., PyTorch)<jupyter_code>try:
import torch
except ImportError:
print("PyTorch is not available. Skipping FP8 example")
import sys; sys.exit(0)
if not hasattr(torch, "float8_e4m3fn"):
print("Version of PyTorch does not have the float8_e4m3fn data type. Skipping FP8 example")
import sys; sys.exit(0)
# FP8 is supported through the CUTLASS Python interface on SM90 and higher
if device_cc() >= 90:
plan = cutlass.op.Gemm(element=torch.float8_e4m3fn, element_C=torch.float32, element_accumulator=torch.float32,
layout_A=cutlass.LayoutType.RowMajor, layout_B=cutlass.LayoutType.ColumnMajor,
layout_C=cutlass.LayoutType.ColumnMajor)
# Create input/output tensors in FP8
A, B = [torch.ones((128, 128)).to(torch.float8_e4m3fn).to("cuda") for _ in range(2)]
C, D = [torch.zeros((128, 128)).to(torch.float8_e4m3fn).to("cuda") for _ in range(2)]
# Run the GEMM
plan.run(A, B, C, D, print_module=print_module)<jupyter_output><empty_output> | examples/python/00_basic_gemm.ipynb/0 | {
"file_path": "examples/python/00_basic_gemm.ipynb",
"repo_id": "examples",
"token_count": 3596
} | 10 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/** Common algorithms on (hierarchical) tensors */
#pragma once
#include <cute/config.hpp>
#include <cute/tensor_impl.hpp>
namespace cute
{
//
// for_each
//
template <class Engine, class Layout, class UnaryOp>
CUTE_HOST_DEVICE constexpr
void
for_each(Tensor<Engine,Layout> const& tensor, UnaryOp&& op)
{
CUTE_UNROLL
for (int i = 0; i < size(tensor); ++i) {
op(tensor(i));
}
}
template <class Engine, class Layout, class UnaryOp>
CUTE_HOST_DEVICE constexpr
void
for_each(Tensor<Engine,Layout>& tensor, UnaryOp&& op)
{
CUTE_UNROLL
for (int i = 0; i < size(tensor); ++i) {
op(tensor(i));
}
}
// Accept mutable temporaries
template <class Engine, class Layout, class UnaryOp>
CUTE_HOST_DEVICE constexpr
void
for_each(Tensor<Engine,Layout>&& tensor, UnaryOp&& op)
{
return for_each(tensor, op);
}
//
// transform
//
// Similar to std::transform but does not return number of elements affected
template <class Engine, class Layout, class UnaryOp>
CUTE_HOST_DEVICE constexpr
void
transform(Tensor<Engine,Layout>& tensor, UnaryOp&& op)
{
CUTE_UNROLL
for (int i = 0; i < size(tensor); ++i) {
tensor(i) = op(tensor(i));
}
}
// Accept mutable temporaries
template <class Engine, class Layout, class UnaryOp>
CUTE_HOST_DEVICE constexpr
void
transform(Tensor<Engine,Layout>&& tensor, UnaryOp&& op)
{
return transform(tensor, op);
}
// Similar to std::transform transforms one tensors and assigns it to another
template <class EngineIn, class LayoutIn,
class EngineOut, class LayoutOut,
class UnaryOp>
CUTE_HOST_DEVICE constexpr
void
transform(Tensor<EngineIn, LayoutIn > const& tensor_in,
Tensor<EngineOut,LayoutOut> & tensor_out,
UnaryOp&& op)
{
CUTE_UNROLL
for (int i = 0; i < size(tensor_in); ++i) {
tensor_out(i) = op(tensor_in(i));
}
}
// Accept mutable temporaries
template <class EngineIn, class LayoutIn,
class EngineOut, class LayoutOut,
class UnaryOp>
CUTE_HOST_DEVICE constexpr
void
transform(Tensor<EngineIn, LayoutIn > const& tensor_in,
Tensor<EngineOut,LayoutOut> && tensor_out,
UnaryOp&& op)
{
return transform(tensor_in, tensor_out, op);
}
// Similar to std::transform with a binary operation
// Takes two tensors as input and one tensor as output.
// Applies the binary_op to tensor_in1 and tensor_in2 and
// assigns it to tensor_out
template <class EngineIn1, class LayoutIn1,
class EngineIn2, class LayoutIn2,
class EngineOut, class LayoutOut,
class BinaryOp>
CUTE_HOST_DEVICE constexpr
void
transform(Tensor<EngineIn1,LayoutIn1> const& tensor_in1,
Tensor<EngineIn2,LayoutIn2> const& tensor_in2,
Tensor<EngineOut,LayoutOut> & tensor_out,
BinaryOp&& op)
{
CUTE_UNROLL
for (int i = 0; i < size(tensor_in1); ++i) {
tensor_out(i) = op(tensor_in1(i), tensor_in2(i));
}
}
// Accept mutable temporaries
template <class EngineIn1, class LayoutIn1,
class EngineIn2, class LayoutIn2,
class EngineOut, class LayoutOut,
class BinaryOp>
CUTE_HOST_DEVICE constexpr
void
transform(Tensor<EngineIn1,LayoutIn1> const& tensor_in1,
Tensor<EngineIn2,LayoutIn2> const& tensor_in2,
Tensor<EngineOut,LayoutOut> && tensor_out,
BinaryOp&& op)
{
return transform(tensor_in1, tensor_in2, tensor_out, op);
}
} // end namespace cute
| include/cute/algorithm/tensor_algorithms.hpp/0 | {
"file_path": "include/cute/algorithm/tensor_algorithms.hpp",
"repo_id": "include",
"token_count": 1842
} | 11 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/mma.hpp>
// Config
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) && defined(__CUDA_ARCH_FEAT_SM90_ALL))
# define CUTE_ARCH_MMA_SM90A_ENABLED
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cute {
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA Descriptor and utilities
// GMMA enums and utilities
namespace GMMA
{
enum class LayoutType : uint8_t {
INTERLEAVE = 0,
B128 = 1,
B64 = 2,
B32 = 3,
};
CUTE_HOST_DEVICE char const* to_string(LayoutType const& t) {
switch (t) {
case LayoutType::INTERLEAVE: return "INTERLEAVE";
case LayoutType::B128: return "B128";
case LayoutType::B64: return "B64";
case LayoutType::B32: return "B32";
}
return nullptr;
}
#if !defined(__CUDACC_RTC__)
// Output operator for all enums in this namespace
CUTE_HOST std::ostream& operator<<(std::ostream& os, LayoutType const& t) {
char const* s = to_string(t);
if (s) {
std::operator<<(os, s); // Explicit call to avoid ambiguity
} else {
os.setstate(std::ios_base::failbit);
}
return os;
}
#endif // !defined(__CUDACC_RTC__)
} // end namespace GMMA
union GmmaDescriptor
{
CUTE_HOST_DEVICE constexpr
GmmaDescriptor() noexcept : desc_(0) {}
CUTE_HOST_DEVICE constexpr
GmmaDescriptor(uint64_t desc) noexcept : desc_(desc) {}
CUTE_HOST_DEVICE constexpr
GmmaDescriptor(GmmaDescriptor const& t) noexcept : desc_(t.desc_) {}
CUTE_HOST_DEVICE constexpr
GmmaDescriptor(GmmaDescriptor && t) noexcept : desc_(t.desc_) {}
CUTE_HOST_DEVICE constexpr
GmmaDescriptor& operator=(GmmaDescriptor const& t) noexcept {
desc_ = t.desc_;
return *this;
}
CUTE_HOST_DEVICE constexpr
GmmaDescriptor& operator=(GmmaDescriptor && t) noexcept {
desc_ = t.desc_;
return *this;
}
uint64_t desc_;
uint32_t reg32_[2];
uint16_t reg16_[4];
// Bitfield implementation avoids the need for shifts in assignment
struct {
// start_address, bit [0,14), 4LSB not included
uint16_t start_address_ : 14, : 2; // 14 bits [0,14), 2 bits unused
// leading dimension byte offset, bit [16,30), 4LSB not included
// For N: This is the stride from the first col to the second col of the 8x2 brick in INTERLEAVED
// Unused for all SWIZZLE_* layouts (and assumed to be 1)
// For T: This is the stride from the first 8 rows to the next 8 rows.
uint16_t leading_byte_offset_ : 14, : 2; // 14 bits [0,14), 2 bits unused
// stride dimension byte offset, bit [32,46), 4LSB not included
// For N: This is the stride from the first 8 rows to the next 8 rows.
// For T: This is the stride fro mthe first 8 cols to the next 8 cols.
uint16_t stride_byte_offset_ : 14, : 2; // 14 bits [0,14), 2 bits unused
// base_offset, bit [49,52)
// Valid only for SWIZZLE_128B and SWIZZLE_64B
uint8_t : 1, base_offset_ : 3, : 4; // 1 bit unused, 3 bits [1,4), 4 bits unused
// layout type, bit [62,64)
// SWIZZLE_NONE = 0, SWIZZLE_32B = 3, SWIZZLE_64B = 2, SWIZZLE_128B = 1
uint8_t : 6, layout_type_ : 2; // 6 bits unused, 2 bits [6,8)
} bitfield;
// Decay to a uint64_t
CUTE_HOST_DEVICE constexpr
operator uint64_t() const noexcept { return desc_; }
};
// Printer
CUTE_HOST_DEVICE void
print(GmmaDescriptor const& t)
{
#if !defined(__CUDACC_RTC__)
printf("GmmaDescriptor: 0x%016llx\n", static_cast<unsigned long long>(t.desc_));
printf(" start_addr : 0x%04x\n", t.bitfield.start_address_);
printf(" leading_off: 0x%04x (%d)\n", t.bitfield.leading_byte_offset_, t.bitfield.leading_byte_offset_);
printf(" stride_off : 0x%04x (%d)\n", t.bitfield.stride_byte_offset_, t.bitfield.stride_byte_offset_);
printf(" base_offset: 0x%01x\n", t.bitfield.base_offset_);
printf(" layout_type: 0x%01x (%s)\n", t.bitfield.layout_type_, to_string(static_cast<GMMA::LayoutType>(t.bitfield.layout_type_)));
#endif // !defined(__CUDACC_RTC__)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cute
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cute/arch/mma_sm90_desc.hpp/0 | {
"file_path": "include/cute/arch/mma_sm90_desc.hpp",
"repo_id": "include",
"token_count": 2085
} | 12 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/layout.hpp>
/* This implements a ComposedLayout of the form
* LayoutA o Offset o LayoutB
* and is useful in cases where composition() does not or cannot apply to LayoutA and LayoutB.
* For example, when the "divisibility condition" in shape_div is violated in composition(LayoutA, LayoutB).
*
* This ComposedLayout provides similar functionality to Layout including tiling, partitioning,
* coordinate-to-index mapping and layout manipulations, but is not considered a "normal" layout.
* For example, this layout provides shape() and size() functions, but does not provide stride() functions.
* Mostly, the similar functionality is accomplished by applying each operation to LayoutB only
* as LayoutB defines the domain.
*/
namespace cute
{
// A Layout of non-trivially composable functions: F o I o L
template <class LayoutA, class Offset, class LayoutB>
struct ComposedLayout : private cute::tuple<LayoutA, Offset, LayoutB> // EBO for static layouts
{
CUTE_HOST_DEVICE constexpr
ComposedLayout(LayoutA const& layoutA = {},
Offset const& offset = {},
LayoutB const& layoutB = {})
: cute::tuple<LayoutA, Offset, LayoutB>(layoutA, offset, layoutB)
{}
//
// Accessors
//
static constexpr int rank = LayoutB::rank;
CUTE_HOST_DEVICE constexpr
decltype(auto)
layout_a() const {
return get<0>(static_cast<cute::tuple<LayoutA, Offset, LayoutB> const&>(*this));
}
CUTE_HOST_DEVICE constexpr
decltype(auto)
offset() const {
return get<1>(static_cast<cute::tuple<LayoutA, Offset, LayoutB> const&>(*this));
}
CUTE_HOST_DEVICE constexpr
decltype(auto)
layout_b() const {
return get<2>(static_cast<cute::tuple<LayoutA, Offset, LayoutB> const&>(*this));
}
CUTE_HOST_DEVICE constexpr
decltype(auto)
layout() const {
return *this;
}
CUTE_HOST_DEVICE constexpr
decltype(auto)
shape() const {
return layout_b().shape();
}
// Doesn't really make sense to ask for the strides of this "layout"
CUTE_HOST_DEVICE constexpr
decltype(auto)
stride() const = delete;
//
// Mappings
//
// Map a logical coordinate to a linear index (Coord has no Underscore slice operators)
// OR
// Slice the layout and return the sublayout (Coord has an Underscore slice op)
template <class Coord>
CUTE_HOST_DEVICE constexpr
auto
operator()(Coord const& coord) const {
if constexpr (has_underscore<Coord>::value) {
return slice(coord, *this);
} else {
return layout_a()(offset() + layout_b()(coord)); // (A o O o B)(c)
}
CUTE_GCC_UNREACHABLE;
}
// Convenience function for multi-dimensional coordinates
template <class Coord0, class Coord1, class... Coords>
CUTE_HOST_DEVICE constexpr
auto
operator()(Coord0 const& c0, Coord1 const& c1, Coords const&... cs) const {
return operator()(make_coord(c0,c1,cs...));
}
//
// Compose
//
template <class OtherLayout>
CUTE_HOST_DEVICE constexpr
auto
compose(OtherLayout const& other) const {
return composition(*this, other);
}
template <class... Layouts>
CUTE_HOST_DEVICE constexpr
auto
compose(Layouts const&... layouts) const {
return composition(*this, make_tile(layouts...));
}
template <class OtherShape>
CUTE_HOST_DEVICE constexpr
auto
with_shape(OtherShape const& shape) const {
return composition(*this, make_layout(shape));
}
template <class... Shapes>
CUTE_HOST_DEVICE constexpr
auto
with_shape(Shapes const&... shapes) const {
return composition(*this, make_layout(make_shape(shapes...)));
}
//
// Tile
//
template <class OtherLayout>
CUTE_HOST_DEVICE constexpr
auto
tile(OtherLayout const& other) const {
return tiled_divide(*this, other);
}
template <class... Layouts>
CUTE_HOST_DEVICE constexpr
auto
tile(Layouts const&... layouts) const {
return tiled_divide(*this, make_tile(layouts...));
}
// Equality, return a static or dynamic boolean
template <class... Args>
CUTE_HOST_DEVICE constexpr
auto
operator==(ComposedLayout<Args...> const& other) const {
return this->layout_a() == other.layout_a() &&
this->layout_b() == other.layout_b() &&
this->offset() == other.offset();
}
};
template <class A, class O, class B>
struct is_layout<ComposedLayout<A,O,B>> : true_type {};
template <class T>
struct is_composed_layout : false_type {};
template <class A, class O, class B>
struct is_composed_layout<ComposedLayout<A,O,B>> : true_type {};
//
// Constructors
//
template <class LayoutA, class Offset, class LayoutB>
CUTE_HOST_DEVICE constexpr
auto
make_composed_layout(LayoutA const& layoutA,
Offset const& offset,
LayoutB const& layoutB)
{
return ComposedLayout<LayoutA, Offset, LayoutB>{layoutA, offset, layoutB};
}
//
// Utilities
//
// Return the layout of a mode
template <int... Is, class A, class O, class B>
CUTE_HOST_DEVICE constexpr
decltype(auto)
layout(ComposedLayout<A,O,B> const& clayout)
{
return composition(clayout.layout_a(), clayout.offset(), layout<Is...>(clayout.layout_b()));
}
// Return the shape of a mode
template <int... Is, class A, class O, class B>
CUTE_HOST_DEVICE constexpr
decltype(auto)
shape(ComposedLayout<A,O,B> const& layout)
{
return shape<Is...>(layout.layout_b());
}
// Doesn't make sense to directly ask for the strides of this "layout"
template <int... Is, class Fn, class O, class Layout>
CUTE_HOST_DEVICE constexpr
decltype(auto)
stride(ComposedLayout<Fn,O,Layout> const& layout) = delete;
// Return the number of elements in a mode
template <int... Is, class A, class O, class B>
CUTE_HOST_DEVICE constexpr
decltype(auto)
size(ComposedLayout<A,O,B> const& layout)
{
return size<Is...>(layout.layout_b());
}
// Return the number of modes
template <int... Is, class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
rank(ComposedLayout<A,O,B> const& layout)
{
return rank<Is...>(layout.layout_b());
}
// Return the depth of the layout
template <int... Is, class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
depth(ComposedLayout<A,O,B> const& layout)
{
return depth<Is...>(layout.layout_b());
}
// Return the codomain size of a mode
template <int... Is, class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
cosize(ComposedLayout<A,O,B> const& layout)
{
return cosize<Is...>(layout.layout_b());
}
//
// Operations to manipulate Layouts like a tuple of pairs
//
template <size_t I, class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
get(ComposedLayout<A,O,B> const& a)
{
return composition(a.layout_a(), a.offset(), get<I>(a.layout_b()));
}
template <int Begin, int End, class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
take(ComposedLayout<A,O,B> const& a)
{
return composition(a.layout_a(), a.offset(), take<Begin,End>(a.layout_b()));
}
template <class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
flatten(ComposedLayout<A,O,B> const& a)
{
return composition(a.layout_a(), a.offset(), flatten(a.layout_b()));
}
template <int N, class A, class O, class B, class X>
CUTE_HOST_DEVICE constexpr
auto
append(ComposedLayout<A,O,B> const& a, X const& x)
{
return composition(a.layout_a(), a.offset(), append<N>(a.layout_b(), x));
}
template <int Begin, int End, class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
group(ComposedLayout<A,O,B> const& a)
{
return composition(a.layout_a(), a.offset(), group<Begin,End>(a.layout_b()));
}
//
// Slice a ComposedLayout
//
template <class Coord, class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
slice_and_offset(Coord const& coord, ComposedLayout<A,O,B> const& layout)
{
auto [slice, offset] = slice_and_offset(coord, layout.layout_b());
return cute::make_tuple(ComposedLayout{layout.layout_a(), layout.offset() + offset, slice}, Int<0>{});
}
template <class Coord, class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
slice(Coord const& coord, ComposedLayout<A,O,B> const& layout)
{
return get<0>(slice_and_offset(coord, layout));
}
// Compute a pointer offset and (potentially modified) layout from a coordinate
// For composed layout tensors the offset is accumulated in the layout itself while pointer is not updated
template <class Coord, class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
domain_offset(Coord const& coord, ComposedLayout<A,O,B> const& layout)
{
return cute::make_tuple(ComposedLayout{layout.layout_a(), layout.offset() + layout.layout_b()(coord), layout.layout_b()}, Int<0>{});
}
//
// composition
//
template <class LayoutA,
class Offset,
class LayoutB>
CUTE_HOST_DEVICE constexpr
auto
composition(LayoutA const& layoutA,
Offset const& offset,
LayoutB const& layoutB)
{
return ComposedLayout<LayoutA, Offset, LayoutB>{layoutA, offset, layoutB};
}
template <class A, class O, class B, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
composition(ComposedLayout<A,O,B> const& a,
Tiler const& b)
{
return composition(a.layout_a(), a.offset(), composition(a.layout_b(), b));
}
template <class ShapeA, class StrideA,
class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
composition(Layout<ShapeA,StrideA> const& a,
ComposedLayout<A,O,B> const& b)
{
CUTE_STATIC_ASSERT_V(b.offset() == Int<0>{}, "Require offset == 0.");
return composition(composition(a, b.layout_a()), b.layout_b());
}
//
// complement
//
template <class A, class O, class B, class CoTarget>
CUTE_HOST_DEVICE constexpr
auto
complement(ComposedLayout<A,O,B> const& layout, CoTarget const& cotarget)
{
return complement(layout.layout_b(), cotarget);
}
template <class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
complement(ComposedLayout<A,O,B> const& layout)
{
return complement(layout, cosize(layout));
}
//
// inverse
//
template <class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
right_inverse(ComposedLayout<A,O,B> const& layout)
{
return composition(right_inverse(layout.layout_b()), right_inverse(layout.offset()), right_inverse(layout.layout_a()));
}
template <class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
left_inverse(ComposedLayout<A,O,B> const& layout)
{
return composition(left_inverse(layout.layout_b()), left_inverse(layout.offset()), left_inverse(layout.layout_a()));
}
//
// Other operations
//
template <class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
zip(ComposedLayout<A,O,B> const& a)
{
return composition(a.layout_a(), a.offset(), zip(a.layout_b()));
}
// Partitions
template <class A, class O, class B, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
logical_divide(ComposedLayout<A,O,B> const& a,
Tiler const& b)
{
return composition(a.layout_a(), a.offset(), logical_divide(a.layout_b(), b));
}
template <class A, class O, class B, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
tile_unzip(ComposedLayout<A,O,B> const& a,
Tiler const& b)
{
return composition(a.layout_a(), a.offset(), tile_unzip(a.layout_b(), b));
}
template <class A, class O, class B, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
tiled_divide(ComposedLayout<A,O,B> const& a,
Tiler const& b)
{
return composition(a.layout_a(), a.offset(), tiled_divide(a.layout_b(), b));
}
template <class A, class O, class B, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
zipped_divide(ComposedLayout<A,O,B> const& a,
Tiler const& b)
{
return composition(a.layout_a(), a.offset(), zipped_divide(a.layout_b(), b));
}
template <class A, class O, class B, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
flat_divide(ComposedLayout<A,O,B> const& a,
Tiler const& b)
{
return composition(a.layout_a(), a.offset(), flat_divide(a.layout_b(), b));
}
template <class A, class O, class B, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
logical_product(ComposedLayout<A,O,B> const& a,
Tiler const& b)
{
return composition(a.layout_a(), a.offset(), logical_product(a.layout_b(), b));
}
template <class A, class O, class B, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
zipped_product(ComposedLayout<A,O,B> const& a,
Tiler const& b)
{
return composition(a.layout_a(), a.offset(), zipped_product(a.layout_b(), b));
}
template <class A, class O, class B, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
tiled_product(ComposedLayout<A,O,B> const& a,
Tiler const& b)
{
return composition(a.layout_a(), a.offset(), tiled_product(a.layout_b(), b));
}
template <class A, class O, class B, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
flat_product(ComposedLayout<A,O,B> const& a,
Tiler const& b)
{
return composition(a.layout_a(), a.offset(), flat_product(a.layout_b(), b));
}
template <class A, class O, class B, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
blocked_product(ComposedLayout<A,O,B> const& a,
Tiler const& b)
{
return composition(a.layout_a(), a.offset(), blocked_product(a.layout_b(), b));
}
template <class A, class O, class B, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
raked_product(ComposedLayout<A,O,B> const& a,
Tiler const& b)
{
return composition(a.layout_a(), a.offset(), raked_product(a.layout_b(), b));
}
template <class A, class O, class B,
class Shape, class ModeOrder = GenColMajor>
CUTE_HOST_DEVICE constexpr
auto
tile_to_shape(ComposedLayout<A,O,B> const& layout,
Shape const& trg_shape,
ModeOrder const& ord_shape = {})
{
return composition(layout.layout_a(), layout.offset(), tile_to_shape(layout.layout_b(), trg_shape, ord_shape));
}
template <class A, class O, class B,
class Shape>
CUTE_HOST_DEVICE constexpr
auto
filter(ComposedLayout<A,O,B> const& layout, Shape const& trg_profile)
{
return composition(layout.layout_a(), layout.offset(), filter(layout.layout_b(), trg_profile));
}
template <class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
coalesce(ComposedLayout<A,O,B> const& layout)
{
return composition(layout.layout_a(), layout.offset(), coalesce(layout.layout_b()));
}
template <class A, class O, class B, class Shape>
CUTE_HOST_DEVICE constexpr
auto
coalesce(ComposedLayout<A,O,B> const& layout, Shape const& trg_profile)
{
return composition(layout.layout_a(), layout.offset(), coalesce(layout.layout_b(), trg_profile));
}
//
// Upcast and Downcast
//
template <int N, class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
upcast(ComposedLayout<A,O,B> const& layout)
{
return composition(upcast<N>(layout.layout_a()), upcast<N>(layout.offset()), upcast<N>(layout.layout_b()));
}
template <int N, class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
downcast(ComposedLayout<A,O,B> const& layout)
{
return composition(downcast<N>(layout.layout_a()), downcast<N>(layout.offset()), downcast<N>(layout.layout_b()));
}
template <class OldType, class NewType,
class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
recast_layout(ComposedLayout<A,O,B> const& layout)
{
using scale = decltype(trait_ratio(sizeof_bits<NewType>{}, sizeof_bits<OldType>{}));
if constexpr (scale::num == 1 && scale::den == 1) {
return layout;
}
else if constexpr (scale::num == 1) {
return downcast<scale::den>(layout);
}
else if constexpr (scale::den == 1) {
return upcast<scale::num>(layout);
}
else {
static_assert(dependent_false<scale>, "Recast not supported.");
}
CUTE_GCC_UNREACHABLE;
}
template <class A, class O, class B>
CUTE_HOST_DEVICE constexpr
auto
max_alignment(ComposedLayout<A,O,B> const& layout)
{
// Do not attempt for general ComposedLayouts
//return gcd(max_alignment(layout.layout_a()), max_alignment(layout.offset()), max_alignment(layout.layout_b()));
return Int<1>{};
}
//
// Display utilities
//
template <class A, class O, class B>
CUTE_HOST_DEVICE void print(ComposedLayout<A,O,B> const& layout)
{
print(layout.layout_a()); print(" o "); print(layout.offset()); print(" o "); print(layout.layout_b());
}
#if !defined(__CUDACC_RTC__)
template <class A, class O, class B>
CUTE_HOST std::ostream& operator<<(std::ostream& os, ComposedLayout<A,O,B> const& layout)
{
return os << layout.layout_a() << " o " << layout.offset() << " o " << layout.layout_b();
}
#endif
} // end namespace cute
| include/cute/layout_composed.hpp/0 | {
"file_path": "include/cute/layout_composed.hpp",
"repo_id": "include",
"token_count": 6777
} | 13 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/layout.hpp>
#include <cute/layout_composed.hpp>
#include <cute/swizzle.hpp>
#include <cute/pointer_swizzle.hpp> // get_swizzle
/* Specialized functionality for a ComposedLayout of the form
* InvolutionFn o Offset o LayoutB
* where the InvolutionFn is a Swizzle<B,M,S> and is not linear (hence the need for the Offset).
*
* Because these are specializations for core functions of ComposedLayout, these Swizzle Layouts
* provide similar functionality to Layout including tiling, partitioning,
* coordinate-to-index mapping and layout manipulations, but are not considered "normal" layouts.
* For example, these provide shape() and size() functions, but do not provide stride() functions.
*
* Furthermore, each of these specializations uses Swizzle<>-specific knowledge in its implementation and
* attempts to decay itself to a normal-layout with dynamic or static strides when certain slicing conditions
* are met. This is possible by determining the subdomain of the Swizzle<> function that is identity and
* testing if LayoutB's codomain is contained within it. In general, MizedBits is used as the Offset to track
* statically-vs-dynamically known bits in the Offset to improve the decay to static or dynamic normal layouts.
*/
namespace cute
{
template <int B, int M, int S, class Offset, class LayoutB>
struct get_swizzle<ComposedLayout<Swizzle<B,M,S>,Offset,LayoutB>> { using type = Swizzle<B,M,S>; };
//
// Constructors
//
template <int B, int M, int S>
CUTE_HOST_DEVICE constexpr
auto
make_layout(Swizzle<B,M,S> const& sxor)
{
return composition(sxor, Layout<Int<M+B+abs(S)>,Int<1>>{});
}
namespace detail {
template <int B, int M, int S, class OldShape, class OldStride, class NewShape, class NewStride>
CUTE_HOST_DEVICE constexpr
auto
transfer_swizzle(Layout<OldShape,OldStride> const& old_layout,
Layout<NewShape,NewStride> const& new_layout)
{
// Our goal is to determine a new swizzle for the strides in new_layout for consistent vectorizations
// This is accomplished by identifying
// S o L :=: S? o L*
// We identify the "active" portion of S by computing (P o L)(c*) where P is a projection generated by S
// Then that active identifier is transformed through the layouts:
// L*(L[(P o L)(c*)])
// which is a new swizzle identifier for S?, the new swizzle
// Projections of the swizzle layout for composition, P
auto swizzle_only_zy = make_layout(make_shape (Int<(1 << M)>{}, Int<(1 << B)>{}, Int<(1 << (abs(S)-B))>{}, Int<(1 << B )>{}, Int<1>{}),
make_stride( Int<0>{}, Int<(1 << M)>{}, Int<0>{}, Int<(1 << (M+abs(S)))>{}, Int<0>{}));
// Compose with the tile to get the swizzle projection, P o L [The Z and Y contributing portions of L]
auto layout_only_zy = composition(swizzle_only_zy, old_layout);
// Transform the end coordinate to get the active bits of the swizzle, (P o L)(c*)
auto swizzle_active_bits = layout_only_zy(size(layout_only_zy)-Int<1>{});
// Get the Z bit and the Y bits -- keep only those that are active in Z *and* Y
auto zzz_msk = typename Swizzle<B,M,S>::zzz_msk{};
auto yyy_msk = typename Swizzle<B,M,S>::yyy_msk{};
auto msk_sft = typename Swizzle<B,M,S>::msk_sft{};
auto active_Z = swizzle_active_bits & shiftr(swizzle_active_bits, msk_sft) & zzz_msk;
auto active_Y = swizzle_active_bits & shiftr(swizzle_active_bits, -msk_sft) & yyy_msk;
// Pass the identifiers through the old layout and new layout to make a new swizzle identifier, L*(L[(P o L)(c*)])
auto new_active_Z = new_layout(old_layout.get_1d_coord(active_Z));
auto new_active_Y = new_layout(old_layout.get_1d_coord(active_Y));
// Use this new swizzle identifier to construct the new swizzle for new_layout
// (this also makes sure it's a "valid" swizzle that Swizzle can represent)
return composition(make_swizzle<new_active_Y,new_active_Z>(), new_layout);
}
} // end namespace detail
template <int B, int M, int S, class Offset, class Layout>
CUTE_HOST_DEVICE constexpr
auto
make_fragment_like(ComposedLayout<Swizzle<B,M,S>,Offset,Layout> const& layout)
{
return make_fragment_like(layout.layout_b());
}
//
// Utilities
//
namespace detail {
// Get just the Swizzle part of a composed layout.
template <int B, int M, int S, class Offset, class LayoutB>
CUTE_HOST_DEVICE constexpr
auto
get_swizzle_portion(ComposedLayout<Swizzle<B,M,S>,Offset,LayoutB>)
{
return Swizzle<B,M,S>{};
}
// A non-swizzled layout's "Swizzle part" is the identity swizzle.
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
get_swizzle_portion(Layout<Shape,Stride>)
{
return Swizzle<0,4,3>{};
}
// Get the "non-swizzle" part of a composed layout,
// which is the underlying (non-composed) Layout.
template <int B, int M, int S, class Offset, class LayoutB>
CUTE_HOST_DEVICE constexpr
auto
get_nonswizzle_portion(ComposedLayout<Swizzle<B,M,S>,Offset,LayoutB> const& slayout)
{
return slayout.layout_b();
}
// The non-swizzle part of a non-swizzled layout is just the Layout.
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
get_nonswizzle_portion(Layout<Shape,Stride> const& slayout)
{
return slayout;
}
} // namespace detail
//
// Slice a Swizzled ComposedLayout
//
namespace detail {
template <class IntZ, class IntY, class Offset, int... I>
CUTE_HOST_DEVICE constexpr
auto
make_swizzle_strides(true_type,
IntZ const& Z,
IntY const& Y,
Offset const& offset,
int_sequence<I...>)
{
// Below is an optimized/compressed version of:
//return cute::make_tuple((swizzle(offset + Z*Int<(1 << I)>{}) - swizzle(offset))...);
// with knowledge of Swizzle, I... ranges for each B bits,
// and the layout won't slice along z-bits that are already set
// y\z 0 1
// 0 Z DC
// 1 -Z DC
return cute::make_tuple(conditional_return((offset & (Y << Int<I>{})) == Int<0>{}, Z << Int<I>{}, -(Z << Int<I>{}))...);
}
template <class IntZ, class IntY, class Offset, int... I>
CUTE_HOST_DEVICE constexpr
auto
make_swizzle_strides(false_type,
IntZ const& Z,
IntY const& Y,
Offset const& offset,
int_sequence<I...>)
{
// Below is an optimized/compressed version of:
//return cute::make_tuple((swizzle(offset + Y*Int<(1 << I)>{}) - swizzle(offset))...);
// with knowledge of Swizzle, I... ranges for each B bits,
// and the layout won't slice along y-bits that are already set
// y\z 0 1
// 0 Y+Z Y-Z
// 1 DC DC
return cute::make_tuple(conditional_return((offset & (Z << Int<I>{})) == Int<0>{}, (Y+Z) << Int<I>{}, (Y-Z) << Int<I>{})...);
}
} // end namespace detail
template <class Coord, int B, int M, int S, class Offset, class Layout>
CUTE_HOST_DEVICE constexpr
auto
slice_and_offset(Coord const& coord, ComposedLayout<Swizzle<B,M,S>,Offset,Layout> const& layout)
{
if constexpr (all_underscore<Coord>::value) {
// Skip the expensive/complicated attempt to decay to a normal layout and just reshape
return cute::make_tuple(composition(layout.layout_a(), layout.offset(), slice(coord, layout.layout_b())), Int<0>{});
} else {
// Projections of the swizzle layout for composition
auto sw = make_layout(make_shape(Int<(1 << M)>{}, Int<(1 << B)>{}, Int<(1 << (abs(S)-B))>{}, Int<(1 << B)>{}, Int<1>{}));
auto swizzle_anti_zy = make_layout(shape(sw),
make_stride(stride<0>(sw), Int<0>{}, stride<2>(sw), Int<0>{}, size(sw)));
auto swizzle_only_zy = make_layout(shape(sw),
make_stride( Int<0>{}, stride<1>(sw), Int<0>{}, stride<3>(sw), Int<0>{}));
// The portion of the layout that is not yet consumed
auto sliced_layout = slice(coord, layout.layout_b());
// If the sliced_layout hits two bits that are swizzled together, then don't attempt to decay
// Compose with the layout to get the swizzle projection, P o L [The Z and Y contributing portions of L]
// (this also tests that shape/stride of layout compose with swizzle)
auto sliced_layout_only_zy = composition(swizzle_only_zy, sliced_layout);
// Transform the end coordinate to get the active bits of the swizzle, (P o L)(c*)
auto swizzle_active_bits = sliced_layout_only_zy(size(sliced_layout_only_zy)-Int<1>{});
// Determine if any active bits collide under the swizzle
auto hit_ZandY = !(swizzle_active_bits & ~layout.layout_a()(swizzle_active_bits));
// The portion of the layout that we are consuming now
auto diced_layout = dice(coord, layout.layout_b());
auto diced_coord = dice(coord, coord);
auto diced_layout_anti_zy = composition(swizzle_anti_zy, diced_layout);
auto diced_layout_only_zy = composition(swizzle_only_zy, diced_layout);
// New swizzle and offset
auto swizzle = layout.layout_a();
// offset_only_zy interacts with swizzle and gets accumulated with layout.offset()
// being careful about the static/dynamic contributions from diced_layout and diced_coord
auto offset_only_zy = layout.offset() ^ to_mixed_bits(diced_layout_only_zy, diced_coord);
// offset_anti_zy always gets passed through, no interaction with swizzle
auto offset_anti_zy = diced_layout_anti_zy(diced_coord);
// If Layout's codomain hits on Y AND Z, then it's not reducible
// If Layout's codomain hits on Y XOR Z, then it's dynamic-normal
// If Layout's codomain hits on neither Y NOR Z, then it's static-normal
// Test the sliced layout for hit_X & hit_Y for potential decay
if constexpr (is_constant<false, decltype(hit_ZandY)>::value)
{ // Hits on Y AND Z, so it's not reducible
return cute::make_tuple(composition(swizzle, offset_only_zy, sliced_layout), offset_anti_zy);
} else
{ // Misses on Y or Z, so it's static-normal or dynamic-normal
// Lowest bit of the Z and Y masks
auto Z = typename Swizzle<B,M,S>::zzz_msk{} & -typename Swizzle<B,M,S>::zzz_msk{};
auto Y = typename Swizzle<B,M,S>::yyy_msk{} & -typename Swizzle<B,M,S>::yyy_msk{};
auto stride_lo = detail::make_swizzle_strides(Z < Y, Z, Y, offset_only_zy, make_int_sequence<B>{});
auto stride_hi = detail::make_swizzle_strides(Z > Y, Z, Y, offset_only_zy, make_int_sequence<B>{});
// Construct a (dynamic) layout that we can perform the composition with
auto swizzle_layout = make_layout(make_shape (Int<(1 << M)>{}, repeat<B>(Int<2>{}), Int<(1 << (abs(S)-B))>{}, repeat<B>(Int<2>{}), Int< 1>{}),
make_stride(Int< 1>{}, stride_lo, Int<(1 << (M+B))>{}, stride_hi , Int<(1 << (M+B+abs(S)))>{}));
// Decay to a normal layout with offset
return cute::make_tuple(composition(swizzle_layout, sliced_layout),
swizzle(offset_only_zy) + offset_anti_zy);
}
}
CUTE_GCC_UNREACHABLE;
}
//
// composition
//
// Ignore identity case
template <int M, int S,
class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
composition(Swizzle<0,M,S> const&,
Int<0> const&,
Layout<Shape,Stride> const& layout)
{
return layout;
}
template <int B, int M, int S,
class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
composition(Swizzle<B,M,S> const& sxor,
Layout<Shape,Stride> const& layout)
{
return composition(sxor, Int<0>{}, layout);
}
template <class ShapeA, class StrideA,
int B, int M, int S>
CUTE_HOST_DEVICE constexpr
auto
composition(Layout<ShapeA,StrideA> const& a,
Swizzle<B,M,S> const& b)
{
// Get the Z bits and the Y bits
auto active_Y = a(typename Swizzle<B,M,S>::yyy_msk{});
auto active_Z = a(typename Swizzle<B,M,S>::zzz_msk{});
// Works in simple cases... but could be greatly generalized
return composition(make_swizzle<active_Y,active_Z>(), a);
}
//
// inverse
//
// Specialization to attempt to pass-through the Swizzle back to the left -- Needed?
template <int B, int M, int S, class Offset, class Layout>
CUTE_HOST_DEVICE constexpr
auto
right_inverse(ComposedLayout<Swizzle<B,M,S>,Offset,Layout> const& layout)
{
if constexpr (is_constant<0, Offset>::value) {
return composition(right_inverse(layout.layout_b()), layout.layout_a());
} else {
return composition(right_inverse(layout.layout_b()), right_inverse(layout.offset()), right_inverse(layout.layout_a()));
}
}
// Specialization to attempt to pass-through the Swizzle back to the left -- Needed?
template <int B, int M, int S, class Offset, class Layout>
CUTE_HOST_DEVICE constexpr
auto
left_inverse(ComposedLayout<Swizzle<B,M,S>,Offset,Layout> const& layout)
{
if constexpr (is_constant<0, Offset>::value) {
return composition(left_inverse(layout.layout_b()), layout.layout_a());
} else {
return composition(left_inverse(layout.layout_b()), left_inverse(layout.offset()), left_inverse(layout.layout_a()));
}
}
template <int B, int M, int S>
CUTE_HOST_DEVICE constexpr
Swizzle<B,M,S>
right_inverse(Swizzle<B,M,S> const& sw)
{
return sw;
}
template <int B, int M, int S>
CUTE_HOST_DEVICE constexpr
Swizzle<B,M,S>
left_inverse(Swizzle<B,M,S> const& sw)
{
return sw;
}
// Kludge -- Probably want an OffsetFn<T> here instead
template <class T, __CUTE_REQUIRES(is_integral<T>::value)>
CUTE_HOST_DEVICE constexpr
auto
right_inverse(T const& t)
{
return -t;
}
// Kludge -- Probably want an OffsetFn<T> here instead
template <class T, __CUTE_REQUIRES(is_integral<T>::value)>
CUTE_HOST_DEVICE constexpr
auto
left_inverse(T const& t)
{
return -t;
}
//
// Upcast and Downcast
//
template <int N, int B, int M, int S>
CUTE_HOST_DEVICE constexpr
auto
upcast(Swizzle<B,M,S> const& swizzle)
{
static_assert(has_single_bit(N), "N must be a power of two");
constexpr int log2_n = bit_width(uint32_t(N)) - 1;
constexpr int NewM = M - log2_n;
if constexpr (NewM >= 0) {
return Swizzle<B,NewM,S>{};
} else {
return Swizzle<cute::max(B+NewM,0), 0, S>{};
}
CUTE_GCC_UNREACHABLE;
}
template <int N, int B, int M, int S>
CUTE_HOST_DEVICE constexpr
auto
downcast(Swizzle<B,M,S> const& swizzle)
{
static_assert(has_single_bit(N), "N must be a power of two");
constexpr int log2_n = bit_width(uint32_t(N)) - 1;
return Swizzle<B,(M + log2_n),S>{};
}
template <class OldType, class NewType,
int B, int M, int S>
CUTE_HOST_DEVICE constexpr
auto
recast_layout(Swizzle<B,M,S> const& swizzle)
{
using scale = decltype(trait_ratio(sizeof_bits<NewType>{}, sizeof_bits<OldType>{}));
if constexpr (scale::num == 1 && scale::den == 1) {
return swizzle;
}
else if constexpr (scale::num == 1) {
return downcast<scale::den>(swizzle);
}
else if constexpr (scale::den == 1) {
return upcast<scale::num>(swizzle);
}
else {
static_assert(dependent_false<scale>, "Recast not supported.");
}
CUTE_GCC_UNREACHABLE;
}
template <int B, int M, int S>
CUTE_HOST_DEVICE constexpr
auto
max_alignment(Swizzle<B,M,S> const&)
{
return Int<M>{};
}
template <int B, int M, int S, class Offset, class LayoutB>
CUTE_HOST_DEVICE constexpr
auto
max_alignment(ComposedLayout<Swizzle<B,M,S>,Offset,LayoutB> const& layout)
{
return gcd(max_alignment(layout.layout_a()),
max_alignment(layout.offset()),
max_alignment(layout.layout_b()));
}
//
// Other operations
//
template <int B, int M, int S, class Offset, class LayoutB, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
max_common_layout(ComposedLayout<Swizzle<B,M,S>,Offset,LayoutB> const& a,
Layout<Shape,Stride> const& b)
{
auto common = max_common_layout(a.layout_b(), b);
auto base = Int<(1 << M)>{};
if constexpr (base < size(common)) {
return common.compose(base); // Truncate common to size base
} else {
return common;
}
}
template <class Shape, class Stride, int B, int M, int S, class Offset, class LayoutB>
CUTE_HOST_DEVICE constexpr
auto
max_common_layout(Layout<Shape,Stride> const& a,
ComposedLayout<Swizzle<B,M,S>,Offset,LayoutB> const& b)
{
return max_common_layout(b, a);
}
template <int B, int M, int S, class Offset, class LayoutB, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
max_common_vector(ComposedLayout<Swizzle<B,M,S>,Offset,LayoutB> const& a,
Layout<Shape,Stride> const& b)
{
// This assumes that Offset is in the YZ domain of the Swizzle...
return cute::min(max_common_vector(a.layout_b(), b), Int<(1 << M)>{});
}
template <class Shape, class Stride, int B, int M, int S, class Offset, class LayoutB>
CUTE_HOST_DEVICE constexpr
auto
max_common_vector(Layout<Shape,Stride> const& a,
ComposedLayout<Swizzle<B,M,S>,Offset,LayoutB> const& b)
{
return max_common_vector(b, a);
}
template <int B0, int M0, int S0, class Offset0, class LayoutB0,
int B1, int M1, int S1, class Offset1, class LayoutB1>
CUTE_HOST_DEVICE constexpr
auto
max_common_vector(ComposedLayout<Swizzle<B0,M0,S0>,Offset0,LayoutB0> const& a,
ComposedLayout<Swizzle<B1,M1,S1>,Offset1,LayoutB1> const& b)
{
// Typical impl is composition(a, right_inverse(b))
// so this is Sw0 o B0 o rinv(Sw1 o B1) = Sw0 o B0 o rinv(B1) o Sw1
auto vec = max_common_vector(a.layout_b(), b.layout_b());
// This assumes that Offset is in the YZ domain of the Swizzle...
if constexpr (Swizzle<B0,M0,S0>{} == Swizzle<B1,M1,S1>{}) {
return vec;
} else {
return cute::min(vec, Int<(1 << M0)>{}, Int<(1 << M1)>{});
}
CUTE_GCC_UNREACHABLE;
}
///////////////////////////////////////////////////////////////////////////////
// ComposedLayout as second argument is often more difficult...
template <class Shape, class Stride,
int B, int M, int S, class Offset, class LayoutT>
CUTE_HOST_DEVICE constexpr
auto
logical_product(Layout<Shape,Stride> const& layout,
ComposedLayout<Swizzle<B,M,S>,Offset,LayoutT> const& tiler)
{
CUTE_STATIC_ASSERT_V(tiler.offset() == Int<0>{}, "Require Swizzle offset == 0.");
// The new layout -- if swizzle wasn't an issue, this is the result
// our goal is to determine a new swizzle for these strides
auto new_layout = logical_product(layout, tiler.layout_b());
// This is accomplished by identifying
// S o L :=: S? o L*
// We identify the "active" portion of S by computing (P o L)(c*) where P is a projection generated by S
// Then that active identifier is transformed through the layouts:
// L*(L[(P o L)(c*)])
// which is a new swizzle identifier for S?, the new swizzle
// Projections of the swizzle layout for composition, P
auto swizzle_only_zy = make_layout(make_shape (Int<(1 << M)>{}, Int<(1 << B)>{}, Int<(1 << (abs(S)-B))>{}, Int<(1 << B )>{}, Int<1>{}),
make_stride( Int<0>{}, Int<(1 << M)>{}, Int<0>{}, Int<(1 << (M+abs(S)))>{}, Int<0>{}));
// Compose with the tiler to get the swizzle projection, P o L [The Z and Y contributing portions of L]
auto layout_only_zy = composition(swizzle_only_zy, tiler.layout_b());
// Transform the end coordinate to get the active bits of the swizzle, (P o L)(c*)
auto swizzle_active_bits = layout_only_zy(size(layout_only_zy)-Int<1>{});
// Get the Z bit and the Y bits
auto active_Z = swizzle_active_bits & typename Swizzle<B,M,S>::zzz_msk{};
auto active_Y = swizzle_active_bits & typename Swizzle<B,M,S>::yyy_msk{};
// Pass the identifiers through the old layout and new layout to make a new swizzle identifier, L*(L[(P o L)(c*)])
auto new_active_Z = new_layout(Int<0>{}, tiler.layout_b()[active_Z]);
auto new_active_Y = new_layout(Int<0>{}, tiler.layout_b()[active_Y]);
// Use this new swizzle identifier to construxt the new swizzle for new_layout
// (this also makes sure it's a "valid" swizzle that Swizzle can represent)
return composition(make_swizzle<new_active_Y,new_active_Z>(), new_layout);
}
} // end namespace cute
| include/cute/swizzle_layout.hpp/0 | {
"file_path": "include/cute/swizzle_layout.hpp",
"repo_id": "include",
"token_count": 8507
} | 14 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#include "cutlass/arch/mma.h"
#include "cutlass/complex.h"
#include "cutlass/quaternion.h"
#include "cutlass/functional.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, float, LayoutA, float, LayoutB, float, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = float;
CUTLASS_HOST_DEVICE
void operator()(
Array<float, 1> &d,
Array<float, 1> const &a,
Array<float, 1> const &b,
Array<float, 1> const &c
) {
d[0] = a[0] * b[0] + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, double, LayoutA, double, LayoutB, double, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = double;
CUTLASS_HOST_DEVICE
void operator()(
Array<double, 1> &d,
Array<double, 1> const &a,
Array<double, 1> const &b,
Array<double, 1> const &c
) {
d[0] = a[0] * b[0] + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, int, LayoutA, int, LayoutB, int, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = int;
CUTLASS_HOST_DEVICE
void operator()(
Array<int, 1> &d,
Array<int, 1> const &a,
Array<int, 1> const &b,
Array<int, 1> const &c
) {
d[0] = a[0] * b[0] + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<float>,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<complex<float>, 1> const &a,
Array<complex<float>, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = a[0].real() * b[0].real() + c[0].real();
d[0].imag() = a[0].imag() * b[0].real() + c[0].imag();
d[0].real() = -a[0].imag() * b[0].imag() + d[0].real();
d[0].imag() = a[0].real() * b[0].imag() + d[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<float>,
LayoutA,
float,
LayoutB,
complex<float>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<complex<float>, 1> const &a,
Array<float, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = a[0].real() * b[0] + c[0].real();
d[0].imag() = a[0].imag() * b[0] + c[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
float,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<float, 1> const &a,
Array<complex<float>, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = a[0] * b[0].real() + c[0].real();
d[0].imag() = a[0] * b[0].imag() + d[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<double>,
LayoutA,
complex<double>,
LayoutB,
complex<double>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<double>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<double>, 1> &d,
Array<complex<double>, 1> const &a,
Array<complex<double>, 1> const &b,
Array<complex<double>, 1> const &c
) {
d[0].real() = a[0].real() * b[0].real() + c[0].real();
d[0].imag() = a[0].imag() * b[0].real() + c[0].imag();
d[0].real() = -a[0].imag() * b[0].imag() + d[0].real();
d[0].imag() = a[0].real() * b[0].imag() + d[0].imag();
}
};
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<double>,
LayoutA,
double,
LayoutB,
complex<double>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<double>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<double>, 1> &d,
Array<complex<double>, 1> const &a,
Array<double, 1> const &b,
Array<complex<double>, 1> const &c
) {
d[0].real() = a[0].real() * b[0] + c[0].real();
d[0].imag() = a[0].imag() * b[0] + c[0].imag();
}
};
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
double,
LayoutA,
complex<double>,
LayoutB,
complex<double>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<double>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<double>, 1> &d,
Array<double, 1> const &a,
Array<complex<double>, 1> const &b,
Array<complex<double>, 1> const &c
) {
d[0].real() = a[0] * b[0].real() + c[0].real();
d[0].imag() = a[0] * b[0].imag() + d[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, half_t, LayoutA, half_t, LayoutB, float, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = float;
CUTLASS_HOST_DEVICE
void operator()(
Array<float, 1> &d,
Array<half_t, 1> const &a,
Array<half_t, 1> const &b,
Array<float, 1> const &c
) {
d[0] = float(a[0]) * float(b[0]) + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation for Quaternions
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, Quaternion<float>, LayoutA, Quaternion<float>, LayoutB, Quaternion<float>, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using Element = Quaternion<float>;
using ElementC = Element;
CUTLASS_HOST_DEVICE
void operator()(
Array<Element, 1> &d,
Array<Element, 1> const &a,
Array<Element, 1> const &b,
Array<Element, 1> const &c
) {
multiply_add<Element, Element, Element> op;
d[0] = op(a[0], b[0], c[0]);
}
};
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/arch/mma_sm50.h/0 | {
"file_path": "include/cutlass/arch/mma_sm50.h",
"repo_id": "include",
"token_count": 3887
} | 15 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/layout/matrix.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for int8_t
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
int8_t, ///< ElementA
LayoutA_, ///< LayoutA
int8_t, ///< ElementB
LayoutB_, ///< LayoutB
int32_t, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM72_ENABLED)
using Shape = Shape_;
using ElementA = int8_t;
using LayoutA = LayoutA_;
using ElementB = int8_t;
using LayoutB = LayoutB_;
using ElementC = int32_t;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpMultiplyAdd;
using ArchTag = arch::Sm72;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<16, 16, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape< 8, 32, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape<32, 8, 16>, Shape>::value,
"Supported list of wmma operator shape for s8 multiplicands are: 16x16x16, 8x32x16, and 32x8x16");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::mma_sync(D, A, B, C);
}
#else
static_assert(false, "wmma.mma.sync interger type multiplicands is avialable only for SM72 and beyond");
#endif
};
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for uint8_t
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
uint8_t, ///< ElementA
LayoutA_, ///< LayoutA
uint8_t, ///< ElementB
LayoutB_, ///< LayoutB
int32_t, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM72_ENABLED)
using Shape = Shape_;
using ElementA = uint8_t;
using LayoutA = LayoutA_;
using ElementB = uint8_t;
using LayoutB = LayoutB_;
using ElementC = int32_t;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpMultiplyAdd;
using ArchTag = arch::Sm72;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<16, 16, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape< 8, 32, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape<32, 8, 16>, Shape>::value,
"Supported list of wmma operator shape for u8 multiplicands are: 16x16x16, 8x32x16, and 32x8x16");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::mma_sync(D, A, B, C);
}
#else
static_assert(false, "wmma.mma.sync interger type multiplicands is avialable only for SM72 and beyond");
#endif
};
} // namespace arch
} // namespace cutlass
| include/cutlass/arch/wmma_sm72.h/0 | {
"file_path": "include/cutlass/arch/wmma_sm72.h",
"repo_id": "include",
"token_count": 3101
} | 16 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level fused activation's scale+bias+relu and implicit GEMM convolution
definitions that combine threadblock-scoped matrix multiply-add with the
appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/predicated_scale_bias_vector_access_iterator.h"
#include "cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h"
#include "cutlass/gemm/warp/scale_bias_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for fused batch norm and Conv2dFprop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementScaleBias,
typename LayoutScaleBias,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized,
conv::StrideSupport StrideSupport = StrideSupport::kUnity
> struct DefaultConv2dFpropFusion;
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassTensorOp convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementScaleBias,
typename LayoutScaleBias,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag
>
struct DefaultConv2dFpropFusion <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementScaleBias,
LayoutScaleBias,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
/// Define iterators over tiles from scale/bias vectors
using IteratorScaleBias =
cutlass::conv::threadblock::PredicatedScaleBiasVectorAccessIterator<
cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias,
LayoutScaleBias>;
using SmemIteratorScaleBias =
cutlass::transform::threadblock::RegularScaleBiasVectorAccessIterator<
cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias,
LayoutScaleBias>;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static int const kThreadCount = 32;
// Warp-level iterators to load scale and bias vectors
using WarpIteratorScaleBias = cutlass::gemm::warp::ScaleBiasTileIterator<
MatrixShape<WarpShape::kM, WarpShape::kK>, ElementScaleBias,
LayoutScaleBias, MatrixShape<InstructionShape::kM, InstructionShape::kK>,
typename WarpMmaTensorOp::IteratorA::Base::Policy, kThreadCount,
MmaCore::WarpCount::kK>;
// Define the Mma
using Mma = threadblock::ImplicitGemmFpropFusionMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Global,
IteratorScaleBias,
SmemIteratorScaleBias,
arch::CacheOperation::Always,
MmaPolicy,
WarpIteratorScaleBias,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
1,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionFusion<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and
/// multistage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementScaleBias,
typename LayoutScaleBias,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag
>
struct DefaultConv2dFpropFusion <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementScaleBias,
LayoutScaleBias,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag
>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
/// Define iterators over tiles from scale/bias vectors
using IteratorScaleBias =
cutlass::conv::threadblock::PredicatedScaleBiasVectorAccessIterator<
cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias,
LayoutScaleBias>;
using SmemIteratorScaleBias =
cutlass::transform::threadblock::RegularScaleBiasVectorAccessIterator<
cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias,
LayoutScaleBias>;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static int const kThreadCount = 32;
// Warp-level iterators to load scale and bias vectors
using WarpIteratorScaleBias = cutlass::gemm::warp::ScaleBiasTileIterator<
MatrixShape<WarpShape::kM, WarpShape::kK>, ElementScaleBias,
LayoutScaleBias, MatrixShape<InstructionShape::kM, InstructionShape::kK>,
typename WarpMmaTensorOp::IteratorA::Base::Policy, kThreadCount,
MmaCore::WarpCount::kK>;
// Define the Mma
using Mma = threadblock::ImplicitGemmFpropFusionMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Global,
IteratorScaleBias,
SmemIteratorScaleBias,
arch::CacheOperation::Always,
MmaPolicy,
WarpIteratorScaleBias,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
1,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionFusion<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/default_conv2d_fprop_fusion.h/0 | {
"file_path": "include/cutlass/conv/kernel/default_conv2d_fprop_fusion.h",
"repo_id": "include",
"token_count": 3829
} | 17 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level Depthwise implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/kernel/direct_convolution.h"
#include "cutlass/conv/threadblock/depthwise_mma_core_with_lane_access_size.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/depthwise_fprop_pipelined.h"
// Direct Conv Related Header files
#include "cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_optimized.h"
#include "cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_fixed_stride_dilation.h"
#include "cutlass/conv/threadblock/depthwise_fprop_filter_tile_access_iterator_direct_conv_optimized.h"
#include "cutlass/conv/threadblock/depthwise_fprop_direct_conv_multistage.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for DepthwiseFprop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
conv::StrideSupport StrideSupport = StrideSupport::kUnity,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = cutlass::sizeof_bits<ElementB>::value / cutlass::sizeof_bits<ElementB>::value
> struct DefaultDepthwiseFprop;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for DepthwiseFprop with direct convolution algorithm
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename ThreadBlockOutputShape,
typename FilterShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
conv::StrideSupport StrideSupport = StrideSupport::kUnity,
// MatrixShape<Height, Width>
typename StrideShape = cutlass::MatrixShape<-1, -1>,
// MatrixShape< Height, Width>
typename DilationShape = cutlass::MatrixShape<-1, -1>,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value
> struct DefaultDepthwiseDirect2dConvFprop;
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassSimt convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Depthwise specialization for Analytic IteratorAlgorithm
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultDepthwiseFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag, // cutlass::arch::OpMultiplyAdd
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::conv::threadblock::DepthwiseMmaCoreWithLaneAccessSize<
ThreadblockShape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementAccumulator,
layout::RowMajor,
arch::OpClassSimt,
128,
sizeof_bits<ElementB>::value,
2,
MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB,
cutlass::conv::GroupMode::kDepthwise
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::DepthwiseFpropPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv2dProblemSize,
cutlass::conv::GroupMode::kDepthwise
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Depthwise specialization for direct 2d conv implementation,
/// multiple stage pipeline, and SIMT-based mainloop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename ThreadBlockOutputShape,
typename FilterShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
typename StrideShape,
typename DilationShape,
int AlignmentA,
int AlignmentB
>
struct DefaultDepthwiseDirect2dConvFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
StrideShape,
DilationShape,
AlignmentA,
AlignmentB
> {
// One warp handles the entrie groups per cta.
static_assert(ThreadblockShape::kN == WarpShape::kN,
"ThreadblockShape::kN should be same as WarpShape::kN ");
static_assert(ThreadblockShape::kK == FilterShape::kCount && WarpShape::kK == FilterShape::kCount,
"ThreadblockShape::kK and WarpShape::kK should be same as filter size");
static_assert(ThreadblockShape::kM % WarpShape::kM == 0,
"ThreadblockShape::kM must be divisible by WarpShape shape::kM");
static_assert(ThreadBlockOutputShape::kN, "ThreadBlockOutputShape::kN should be 1");
// Define the core components from GEMM
using MmaCore = typename cutlass::conv::threadblock::DepthwiseDirectConvMmaCoreWithLaneAccessSize<
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementAccumulator,
layout::RowMajor,
arch::OpClassSimt,
128,
128,
Stages,
MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM,ThreadblockShape::kN>, // < outputShape:KMNK, groups per cta>
ThreadBlockOutputShape,
ElementA, LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kN, FilterShape::kCount>,
ElementB, LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
using ThreadOutputShape = typename MmaCore::ThreadOutputShape;
static cutlass::arch::CacheOperation::Kind const CacheOpA =
((sizeof_bits<ElementA>::value * AlignmentA) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultDirectConvEpilogueSimt<
ThreadblockShape, // < outputShape:KMNK, groups per cta>
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
ThreadOutputShape,
ThreadBlockOutputShape
>::Epilogue;
// Define the Mma
using Mma = threadblock::DepthwiseFpropDirectConvMultipleStage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
CacheOpA,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages,
Epilogue
>;
// Define the kernel
using Kernel = cutlass::conv::kernel::DirectConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv2dProblemSize,
cutlass::conv::GroupMode::kDepthwise,
ThreadBlockOutputShape
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Depthwise specialization for direct 2d conv implementation,
/// multiple stage pipeline, and SIMT-based mainloop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename ThreadBlockOutputShape,
typename FilterShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
typename StrideShape,
typename DilationShape,
int AlignmentA,
int AlignmentB
>
struct DefaultDepthwiseDirect2dConvFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kFixedStrideDilation,
StrideSupport,
StrideShape,
DilationShape,
AlignmentA,
AlignmentB
> {
// One warp handles the entrie groups per cta.
static_assert(ThreadblockShape::kN == WarpShape::kN,
"ThreadblockShape::kN should be same as WarpShape::kN ");
static_assert(ThreadblockShape::kK == FilterShape::kCount && WarpShape::kK == FilterShape::kCount,
"ThreadblockShape::kK and WarpShape::kK should be same as filter size");
static_assert(ThreadblockShape::kM % WarpShape::kM == 0,
"ThreadblockShape::kM must be divisible by WarpShape shape::kM");
static_assert(ThreadBlockOutputShape::kN, "ThreadBlockOutputShape::kN should be 1");
static_assert(StrideShape::kRow >= 0 && StrideShape::kColumn >= 0, "Stride should be fixed");
static_assert(DilationShape::kRow >= 0 && DilationShape::kColumn >= 0, "Stride should be fixed");
// Activations loaded by threadblock
static int const ActivationShapeH = (ThreadBlockOutputShape::kH - 1) * StrideShape::kRow +
(FilterShape::kRow - 1) * DilationShape::kRow + 1;
static int const ActivationShapeW = (ThreadBlockOutputShape::kW - 1) * StrideShape::kColumn +
(FilterShape::kColumn - 1) * DilationShape::kColumn + 1;
using ActivationShape =
cutlass::conv::TensorNHWCShape<1, ActivationShapeH, ActivationShapeW, ThreadblockShape::kN >;
// Define the core components from GEMM
using MmaCore = typename cutlass::conv::threadblock::DepthwiseDirectConvMmaCoreWithLaneAccessSize<
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementAccumulator,
layout::RowMajor,
arch::OpClassSimt,
128,
128,
Stages,
MathOperatorTag,
IteratorAlgorithm::kFixedStrideDilation,
StrideShape,
DilationShape,
ActivationShape>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation<
cutlass::MatrixShape<ThreadblockShape::kM,ThreadblockShape::kN>, // < outputShape:KMNK, groups per cta>
ThreadBlockOutputShape,
StrideShape,
DilationShape,
ActivationShape,
ElementA, LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kN, FilterShape::kCount>,
ElementB, LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
using ThreadOutputShape = typename MmaCore::ThreadOutputShape;
static cutlass::arch::CacheOperation::Kind const CacheOpA =
((sizeof_bits<ElementA>::value * AlignmentA) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultDirectConvEpilogueSimt<
ThreadblockShape, // < outputShape:KMNK, groups per cta>
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
ThreadOutputShape,
ThreadBlockOutputShape
>::Epilogue;
// Define the Mma
using Mma = threadblock::DepthwiseFpropDirectConvMultipleStage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
CacheOpA,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages,
Epilogue,
IteratorAlgorithm::kFixedStrideDilation
>;
// Define the kernel
using Kernel = cutlass::conv::kernel::DirectConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv2dProblemSize,
cutlass::conv::GroupMode::kDepthwise,
ThreadBlockOutputShape
>;
};
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/default_depthwise_fprop.h/0 | {
"file_path": "include/cutlass/conv/kernel/default_depthwise_fprop.h",
"repo_id": "include",
"token_count": 6502
} | 18 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorNCxHWx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dFpropActivationTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
using Mask = uint64_t;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dFpropActivationIteratorOptimizedParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
// current filter position (r, s)
int filter_r_;
int filter_s_;
int filter_c_;
Index masks_[ThreadMap::Iterations::kStrided][kAccessesPerVector][2];
public:
CUTLASS_HOST_DEVICE
Conv2dFpropActivationTileAccessIteratorOptimized(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
filter_c_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.column() + thread_coord.contiguous();
int offset_n[ThreadMap::Iterations::kStrided];
int offset_p[ThreadMap::Iterations::kStrided];
int offset_q[ThreadMap::Iterations::kStrided];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_npq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// offset_n[s] = offset_npq / (problem_size_.P * problem_size_.Q);
// int residual = offset_npq % (problem_size_.P * problem_size_.Q);
//
// offset_p[s] = residual / problem_size_.Q;
// offset_q[s] = residual % problem_size_.Q;
//
int residual;
params.pq_divmod(offset_n[s], residual, offset_npq);
params.q_divmod(offset_p[s], offset_q[s], residual);
TensorCoord coord = at_(offset_n[s], offset_p[s], offset_q[s], 0, 0);
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
clear_mask();
CUTLASS_PRAGMA_NO_UNROLL
for (int r = 0; r < problem_size_.R; ++r) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int r_ = r;
if (problem_size_.mode == Mode::kConvolution) {
r_ = problem_size_.R - 1 - r;
}
int h = offset_p[s_idx] * problem_size_.stride_h - problem_size_.pad_h + r_ * problem_size_.dilation_h;
bool pred = (offset_n[s_idx] < problem_size_.N && h >= 0 && h < problem_size_.H);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][0] |= (pred << r);
}
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int s = 0; s < problem_size_.S; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int s_ = s;
if (problem_size_.mode == Mode::kConvolution) {
s_ = problem_size_.S - 1 - s;
}
int w = offset_q[s_idx] * problem_size_.stride_w - problem_size_.pad_w + s_ * problem_size_.dilation_w;
bool pred = (w >= 0 && w < problem_size_.W);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][1] |= (pred << s);
}
}
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= problem_size_.C);
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided});
}
private:
/// Returns the coordinate in the activations tensor X that is correspoinding to
// output npq and filter position r, s
CUTLASS_HOST_DEVICE
TensorCoord at_(int n, int p, int q, int r, int s) const {
if (problem_size_.mode == Mode::kConvolution) {
r = problem_size_.R - 1 - r;
s = problem_size_.S - 1 - s;
}
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
return TensorCoord(n, h, w, filter_c_);
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset;
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_HOST_DEVICE
void advance() {
int next_idx = 0;
// moves to the next tile
++filter_s_;
if (filter_s_ == problem_size_.S) {
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
next_idx = 1;
}
else {
filter_r_ = 0;
next_idx = 2;
}
}
add_byte_offset_(params_.inc_next[next_idx]);
if (next_idx == 2) {
filter_c_ += params_.filter_c_delta;
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= problem_size_.C);
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
masks_[s][v][0] = clear ? 0 : masks_[s][v][0];
masks_[s][v][1] = clear ? 0 : masks_[s][v][1];
}
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(int v, bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][v][0] = clear ? 0 : masks_[s][v][0];
masks_[s][v][1] = clear ? 0 : masks_[s][v][1];
}
}
CUTLASS_HOST_DEVICE
bool valid() {
return
(masks_[iteration_strided_][iteration_vector_][0] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][iteration_vector_][1] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dFpropActivationTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if ((problem_size.C / problem_size.groups) % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
if (platform::is_same<Layout, layout::TensorNCxHWx<32>>::value) {
if (problem_size.C % 32) {
return Status::kErrorInvalidProblem;
}
}
if (platform::is_same<Layout, layout::TensorNCxHWx<64>>::value) {
if (problem_size.C % 64) {
return Status::kErrorInvalidProblem;
}
}
// Conv2dFpropActivationTileAccessIteratorOptimized has constraint on filter positions
// due to the number of mask bits.
if (problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h",
"repo_id": "include",
"token_count": 5193
} | 19 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_
>
class Conv3dFpropActivationTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
using Mask = uint64_t;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv3dFpropActivationIteratorOptimizedParams<Layout>;
private:
Conv3dFpropActivationIteratorOptimizedParams<Layout> const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
// current filter position (t, r, s)
int filter_t_;
int filter_r_;
int filter_s_;
int filter_c_;
// mask for t, r, and s
Index masks_[ThreadMap::Iterations::kStrided][3];
public:
CUTLASS_HOST_DEVICE
Conv3dFpropActivationTileAccessIteratorOptimized(
Conv3dFpropActivationIteratorOptimizedParams<Layout> const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
) :
params_(params),
problem_size_(problem_size),
filter_t_(0),
filter_r_(0),
filter_s_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.column() + thread_coord.contiguous();
int offset_n[ThreadMap::Iterations::kStrided];
int offset_z[ThreadMap::Iterations::kStrided];
int offset_p[ThreadMap::Iterations::kStrided];
int offset_q[ThreadMap::Iterations::kStrided];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_nzpq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// offset_n[s] = offset_nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q);
// int residual = offset_nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q);
//
// offset_z[s] = residual / (problem_size_.P * problem_size_.Q);
// residual = residual % (problem_size_.P * problem_size_.Q);
//
// offset_p[s] = residual / problem_size_.Q;
// offset_q[s] = residual % problem_size_.Q;
//
int residual;
// input: (nzpq offset) output: (n offset and resudial (zpq offset))
params.zpq_divmod(offset_n[s], residual, offset_nzpq);
// input: (zpq offset) output: (z offset and resudial (pq))
params.pq_divmod(offset_z[s], residual, residual);
// input: (pq offset) output: (p offset and resudial (q offset))
params.q_divmod(offset_p[s], offset_q[s], residual);
TensorCoord coord = at_(offset_n[s], offset_z[s], offset_p[s], offset_q[s], 0, 0, 0);
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
clear_mask();
// mask predicates for filter position T
CUTLASS_PRAGMA_NO_UNROLL
for (int t = 0; t < problem_size_.T; ++t) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int t_ = t;
if (problem_size_.mode == Mode::kConvolution) {
t_ = problem_size_.T - 1 - t;
}
int d = offset_z[s_idx] * problem_size_.stride_d - problem_size_.pad_d + t_ * problem_size_.dilation_d;
bool pred = (offset_n[s_idx] < problem_size_.N && d >= 0 && d < problem_size_.D);
masks_[s_idx][0] |= (pred << t);
}
}
// mask predicates for filter position R
CUTLASS_PRAGMA_NO_UNROLL
for (int r = 0; r < problem_size_.R; ++r) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int r_ = r;
if (problem_size_.mode == Mode::kConvolution) {
r_ = problem_size_.R - 1 - r;
}
int h = offset_p[s_idx] * problem_size_.stride_h - problem_size_.pad_h + r_ * problem_size_.dilation_h;
bool pred = (h >= 0 && h < problem_size_.H);
masks_[s_idx][1] |= (pred << r);
}
}
// mask predicates for filter position S
CUTLASS_PRAGMA_NO_UNROLL
for (int s = 0; s < problem_size_.S; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int s_ = s;
if (problem_size_.mode == Mode::kConvolution) {
s_ = problem_size_.S - 1 - s;
}
int w = offset_q[s_idx] * problem_size_.stride_w - problem_size_.pad_w + s_ * problem_size_.dilation_w;
bool pred = (w >= 0 && w < problem_size_.W);
masks_[s_idx][2] |= (pred << s);
}
}
if (filter_c_ >= problem_size.C) {
clear_mask();
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided});
}
private:
/// Returns the coordinate in the activations tensor X that is correspoinding to
// output nzpq and filter position t, r, s
CUTLASS_HOST_DEVICE
TensorCoord at_(int n, int z, int p, int q, int t, int r, int s) const {
if (problem_size_.mode == Mode::kConvolution) {
t = problem_size_.T - 1 - t;
r = problem_size_.R - 1 - r;
s = problem_size_.S - 1 - s;
}
int d = z * problem_size_.stride_d - problem_size_.pad_d + t * problem_size_.dilation_d;
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
return TensorCoord(n, d, h, w, filter_c_);
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset;
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask_(bool clear) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
// We are using inline PTX assembly here to avoid an CUDA C++ compilation
// artifact in which control flow instructions are generated. Instead, our
// intent is to predicate the mov instructions.
#if defined(__CUDA_ARCH__)
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][0])
:
"r"((int)clear),
"r"(masks_[s][0])
);
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][1])
:
"r"((int)clear),
"r"(masks_[s][1])
);
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][2])
:
"r"((int)clear),
"r"(masks_[s][2])
);
#else
if (clear) {
masks_[s][0] = 0;
masks_[s][1] = 0;
masks_[s][2] = 0;
}
#endif
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_HOST_DEVICE
void advance() {
int next_idx = 0;
// moves to the next tile
++filter_s_;
if (filter_s_ == problem_size_.S) {
filter_s_ = 0;
++filter_r_;
next_idx = 1;
if (filter_r_ == problem_size_.R) {
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
next_idx = 2;
}
else {
filter_t_ = 0;
next_idx = 3;
}
}
}
add_byte_offset_(params_.inc_next[next_idx]);
if (next_idx == 3) {
filter_c_ += params_.filter_c_delta;
}
clear_mask_(filter_c_ >= problem_size_.C);
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask() {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][0] = Mask(0);
masks_[s][1] = Mask(0);
masks_[s][2] = Mask(0);
}
}
CUTLASS_HOST_DEVICE
bool valid() {
return
(masks_[iteration_strided_][0] & (Index(1) << filter_t_)) &&
(masks_[iteration_strided_][1] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][2] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dFpropActivationTileAccessIteratorOptimized &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
// Conv3dFpropActivationTileAccessIteratorOptimized has constraint on filter positions
// due to the number of mask bits.
if (problem_size.T > 32 || problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h",
"repo_id": "include",
"token_count": 6148
} | 20 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a multistage threadblock-scoped fused activation's
scale+bias+relu and Implicit GEMM Convolution kernel.
The original implicit gemm will store out-of-bound data as zeroes in the
shared memory because zeros into the tensor core, zeroes out of the tensor
cores. The result is remained the same. When fusing scale+bias+relu
into the mainloop, it is no longer true because
0 x scale + bias = bias
which is no longer always 0. So, instead of storing zeroes, this fused
kernel stores the out-of-bound data as a special NaN (0x7eff), when applying
scale+bias+relu, the code is like
if (data == 0x7eff)
data = 0;
else
data = scale+bias+relu(data, scale, bias);
See include/cutlass/conv/warp/scale_bias_relu_transformation.h for the
elementwise computation. See include/cutlass/arch/memory_sm80.h for nan fill.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/cache_operation.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/scale_bias_tile_iterator.h"
#include "cutlass/conv/warp/scale_bias_relu_transform.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Element type of scale and bias vectors
typename ElementScaleBias_,
/// Layout of scale and bias vectors
typename LayoutScaleBias_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// WarpIterator to load Scale or Bias vector from the shared memory
typename WarpIteratorScaleBias_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class MmaFpropFusionBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Element type of scale and bias vectors
using ElementScaleBias = ElementScaleBias_;
/// Layout of scale and bias vectors
using LayoutScaleBias = LayoutScaleBias_;
///< Policy describing tuning details
using Policy = Policy_;
///< WarpIterator to load Scale or Bias vector from the shared memory
using WarpIteratorScaleBias = WarpIteratorScaleBias_;
//
// Dependent types
//
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = cutlass::gemm::GemmShape<Shape::kM / WarpGemm::kM,
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator::Policy::MmaShape::kK);
/// Number of stages
static int const kStages = Stages;
/// Tensor reference to the A operand
using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;
/// Tensor reference to the scale and bias vectors
using TensorRefScaleBias = TensorRef<ElementScaleBias, LayoutScaleBias>;
/// Tensor reference to the B operand
using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;
static_assert(kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
static_assert((kWarpGemmIterations % 2) == 0,
"Inner loop iteration must be an even number.");
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
class SharedStorage {
public:
//
// Type definitions
//
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,
Shape::kK * kStages +
Policy::SmemPaddingA::kColumn>;
/// Shape of the A scale and bias vectors in shared memory
using ShapeScaleBias =
MatrixShape<1 + Policy::SmemPaddingA::kRow,
2 * Shape::kK * kStages + Policy::SmemPaddingA::kColumn>;
/// Shape of the B matrix operand in shared memory
using ShapeB =
MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow,
Shape::kN + Policy::SmemPaddingB::kColumn>;
public:
//
// Data members
//
/// Buffer for A operand
AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A;
/// Buffer for B operand
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;
/// Buffer for A operand Scale and Bias
AlignedBuffer<ElementScaleBias, ShapeScaleBias::kCount> operand_A_scale_bias;
public:
//
// Methods
//
/// Returns a layout object for the A matrix
CUTLASS_DEVICE
static typename Operator::LayoutA LayoutA() {
return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator::LayoutB LayoutB() {
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});
}
/// Returns a layout object for the A scale and bias vectors
CUTLASS_DEVICE
static LayoutScaleBias LayoutScaleBias() {
return LayoutScaleBias::packed(
{ShapeScaleBias::kRow, ShapeScaleBias::kColumn});
}
/// Returns a TensorRef to the A operand
CUTLASS_HOST_DEVICE
TensorRefA operand_A_ref() {
return TensorRefA{operand_A.data(), LayoutA()};
}
/// Returns a TensorRef to the B operand
CUTLASS_HOST_DEVICE
TensorRefB operand_B_ref() {
return TensorRefB{operand_B.data(), LayoutB()};
}
/// Returns a TensorRef to the A operand Scale vector
CUTLASS_HOST_DEVICE
TensorRefScaleBias operand_A_scale_bias_ref() {
return TensorRefScaleBias{operand_A_scale_bias.data(), LayoutScaleBias()};
}
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A operand from shared memory
typename Operator::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of A operand scale and bias vector
/// from shared memory
WarpIteratorScaleBias warp_tile_iterator_A_scale_bias_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator::IteratorB warp_tile_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaFpropFusionBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx),
warp_tile_iterator_A_scale_bias_(
shared_storage.operand_A_scale_bias_ref(), lane_idx),
warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Iterates over vectors of scale and bias vector in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorScaleBias_,
/// Iterates over vectors of scale and bias vector in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorScaleBias_,
/// Cache operation for scale/bias operand
cutlass::arch::CacheOperation::Kind CacheOpScaleBias,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// WarpIterator to load Scale or Bias vector from the shared memory
typename WarpIteratorScaleBias_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class ImplicitGemmFpropFusionMultistage
: public MmaFpropFusionBase<Shape_, typename IteratorScaleBias_::Element,
typename IteratorScaleBias_::Layout, Policy_,
WarpIteratorScaleBias_, Stages> {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Iterates over tiles of the scale and bias vectors in global memory
using IteratorScaleBias = IteratorScaleBias_;
///< WarpIterator to load Scale or Bias vector from the shared memory
using WarpIteratorScaleBias = WarpIteratorScaleBias_;
///< Policy describing tuning details
using Policy = Policy_;
///< Base class
using Base = MmaFpropFusionBase<Shape_, typename IteratorScaleBias::Element,
typename IteratorScaleBias::Layout, Policy,
WarpIteratorScaleBias, Stages>;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
using SmemIteratorScaleBias = SmemIteratorScaleBias_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static cutlass::arch::CacheOperation::Kind const kCacheOpScaleBias =
CacheOpScaleBias;
//
// Dependent types
//
/// Fragment of accumulator tile
using ElementC = typename Policy::Operator::ElementC;
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Internal structure exposed for introspection.
struct Detail {
static_assert(Base::kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA =
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
};
private:
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpLoadedFragmentScaleBias =
typename WarpIteratorScaleBias::Fragment;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of A operand scale vector to shared memory
SmemIteratorScaleBias smem_iterator_A_scale_bias_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
ImplicitGemmFpropFusionMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_A_scale_bias_(shared_storage.operand_A_scale_bias_ref(),
thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_A_scale_bias_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A,
IteratorScaleBias &iterator_A_scale_bias,
IteratorB &iterator_B, int group_start_A = 0,
int group_start_B = 0) {
iterator_A.set_iteration_index(group_start_A);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess / 8;
// Uses nan fill for out of bound data
cutlass::arch::cp_async_nan<kSrcBytes, kCacheOpA>(
dst_ptr, iterator_A.get(), iterator_A.valid());
++iterator_A;
++this->smem_iterator_A_;
}
}
// Async Copy for operand A scale and bias vector. Scale and bias vectors
// are small. One iteration is enough.
if (group_start_A == 0) {
typename IteratorScaleBias::AccessType *dst_ptr =
reinterpret_cast<typename IteratorScaleBias::AccessType *>(
this->smem_iterator_A_scale_bias_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorScaleBias::Element>::value *
IteratorScaleBias::kElementsPerAccess / 8;
cutlass::arch::cp_async<kSrcBytes, kCacheOpScaleBias>(
dst_ptr, iterator_A_scale_bias.get(), iterator_A_scale_bias.valid());
}
iterator_B.set_iteration_index(group_start_B);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr, iterator_B.get(), iterator_B.valid());
++iterator_B;
++this->smem_iterator_B_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
///< iterator over scale and bias vectors in global memory
IteratorScaleBias iterator_A_scale_bias,
///< initial value of accumulator
FragmentC const &src_accum,
///< number of iterations per channel
int gemm_k_iterations_per_channel = 0,
///< Imaginary strides used for planar-complex only - ignored here
int64_t imag_stride_A = 0,
int64_t imag_stride_B = 0) {
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess / 8;
// Uses Nan fill for out of bound data
cutlass::arch::cp_async_nan<kSrcBytes, kCacheOpA>(
dst_ptr, iterator_A.get(), iterator_A.valid());
++iterator_A;
++this->smem_iterator_A_;
}
// Async Copy for operand A scale and bias vectors. Scale and bias
// vectors are small. One iteration is enough.
{
typename IteratorScaleBias::AccessType *dst_ptr =
reinterpret_cast<typename IteratorScaleBias::AccessType *>(
this->smem_iterator_A_scale_bias_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorScaleBias::Element>::value *
IteratorScaleBias::kElementsPerAccess / 8;
cutlass::arch::cp_async<kSrcBytes, kCacheOpScaleBias>(
dst_ptr, iterator_A_scale_bias.get(), iterator_A_scale_bias.valid());
}
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr, iterator_B.get(), iterator_B.valid());
++iterator_B;
++this->smem_iterator_B_;
}
// Move to the next stage
iterator_A.advance();
iterator_A_scale_bias.advance();
iterator_B.advance();
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_A_scale_bias_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Waits until kStages-2 stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA warp_loaded_frag_A[2];
WarpLoadedFragmentB warp_loaded_frag_B[2];
WarpLoadedFragmentScaleBias warp_loaded_frag_A_scale_bias[2];
WarpTransformedFragmentA warp_transformed_frag_A[2];
WarpTransformedFragmentB warp_transformed_frag_B[2];
Operator warp_mma;
cutlass::conv::warp::FpropScaleBiasReluTransform<WarpTransformedFragmentA,
WarpLoadedFragmentScaleBias>
elementwise_transform;
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_A_scale_bias_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
this->warp_tile_iterator_A_scale_bias_.load(
warp_loaded_frag_A_scale_bias[0]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_A_scale_bias_;
++this->warp_tile_iterator_B_;
// Start issuing the first group of the next stage outside of the mainloop
copy_tiles_and_advance(iterator_A, iterator_A_scale_bias, iterator_B);
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0],
warp_loaded_frag_A[0], warp_loaded_frag_B[0]);
elementwise_transform(warp_transformed_frag_A[0],
warp_loaded_frag_A_scale_bias[0]);
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_scale_bias_.set_kgroup_index(
(warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_A_scale_bias_.load(
warp_loaded_frag_A_scale_bias[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_A_scale_bias_;
++this->warp_tile_iterator_B_;
if (warp_mma_k > 0) {
warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
warp_loaded_frag_A[warp_mma_k % 2],
warp_loaded_frag_B[warp_mma_k % 2]);
elementwise_transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_loaded_frag_A_scale_bias[warp_mma_k % 2]);
}
warp_mma(
accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
accum
);
// Issue global->shared copies for the next stage
int group_start_iteration_A, group_start_iteration_B;
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
group_start_iteration_A = 0;
group_start_iteration_B = 0;
} else {
group_start_iteration_A =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB;
}
copy_tiles_and_advance(iterator_A, iterator_A_scale_bias, iterator_B,
group_start_iteration_A,
group_start_iteration_B);
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_transformed_frag_B[(warp_mma_k + 1) % 2],
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
elementwise_transform(
warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_A_scale_bias[(warp_mma_k + 1) % 2]);
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages of cp.async have committed
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A.advance();
iterator_A_scale_bias.advance();
iterator_B.advance();
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_A_scale_bias_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_A_scale_bias_.add_tile_offset(
{0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_A_scale_bias_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
}
}
}
// Insert fence and wait for all outstanding cp.async operations to commit.
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h/0 | {
"file_path": "include/cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h",
"repo_id": "include",
"token_count": 12230
} | 21 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Helper macros for the CUTLASS library
*/
#pragma once
////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef CUTLASS_NAMESPACE
#define concat_tok(a, b) a ## b
#define mkcutlassnamespace(pre, ns) concat_tok(pre, ns)
#define cutlass mkcutlassnamespace(cutlass_, CUTLASS_NAMESPACE)
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__NVCC__) || (defined(__clang__) && defined(__CUDA__))
#define CUTLASS_HOST_DEVICE __forceinline__ __device__ __host__
#define CUTLASS_DEVICE __forceinline__ __device__
#elif defined(__CUDACC_RTC__)
#define CUTLASS_HOST_DEVICE __forceinline__ __device__
#define CUTLASS_DEVICE __forceinline__ __device__
#else
#define CUTLASS_HOST_DEVICE inline
#define CUTLASS_DEVICE inline
#endif
#define CUTLASS_HOST __host__
#define CUTLASS_GLOBAL __global__ static
////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename T>
CUTLASS_HOST_DEVICE void __CUTLASS_UNUSED(T const &)
{ }
#if defined(__GNUC__)
#define CUTLASS_UNUSED(expr) __CUTLASS_UNUSED(expr)
#else
#define CUTLASS_UNUSED(expr) do { ; } while (&expr != &expr)
#endif
#ifdef _MSC_VER
// Provides support for alternative operators 'and', 'or', and 'not'
#include <iso646.h>
#endif // _MSC_VER
#if !defined(__CUDACC_RTC__)
#include <assert.h>
#endif
#if defined(__CUDA_ARCH__)
#if defined(_MSC_VER)
#define CUTLASS_NOT_IMPLEMENTED() { printf("%s not implemented\n", __FUNCSIG__); asm volatile ("brkpt;\n"); }
#else
#define CUTLASS_NOT_IMPLEMENTED() { printf("%s not implemented\n", __PRETTY_FUNCTION__); asm volatile ("brkpt;\n"); }
#endif
#else
#if defined(_MSC_VER)
#define CUTLASS_NOT_IMPLEMENTED() assert(0 && __FUNCSIG__)
#else
#define CUTLASS_NOT_IMPLEMENTED() assert(0 && __PRETTY_FUNCTION__)
#endif
#endif
// CUTLASS_CMATH_NAMESPACE is the namespace where code can find
// <cmath> functions like isnan and log. Such functions are in
// the std namespace in host code, but in the global namespace
// in device code.
//
// The intended use case for this macro is in "using" declarations
// for making argument-dependent lookup (ADL) work in generic code.
// For example, if T is cutlass::half_t, the following code will
// invoke cutlass::isnan(half_t). If T is float, it will invoke
// std::isnan on host and ::isnan on device. (CUTLASS's support
// for NVRTC prevents it from using things in the std namespace
// in device code.) Correct use of "using" declarations can help
// avoid unexpected implicit conversions, like from half_t to float.
//
// template<class T>
// bool foo(T x) {
// using CUTLASS_CMATH_NAMESPACE :: isnan;
// return isnan(x);
// }
//
// Without this macro, one would need to write the following.
//
// template<class T>
// bool foo(T x) {
// #if defined(__CUDA_ARCH__)
// using ::isnan;
// #else
// using std::isnan;
// #endif
// return isnan(x);
// }
#if defined(__CUDA_ARCH__)
# define CUTLASS_CMATH_NAMESPACE
#else
# define CUTLASS_CMATH_NAMESPACE std
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
#ifndef CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED
#define CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED 0
#endif
// CUDA 10.1 introduces the mma instruction
#if !defined(CUTLASS_ENABLE_TENSOR_CORE_MMA)
#define CUTLASS_ENABLE_TENSOR_CORE_MMA 0
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#define CUTLASS_ASSERT(x) assert(x)
////////////////////////////////////////////////////////////////////////////////////////////////////
// CUTLASS_PRAGMA_(UNROLL|NO_UNROLL) optimization directives for the CUDA compiler.
#if defined(__CUDA_ARCH__) && !defined(__INTELLISENSE__)
#if defined(__CUDACC_RTC__) || (defined(__clang__) && defined(__CUDA__))
#define CUTLASS_PRAGMA_UNROLL _Pragma("unroll")
#define CUTLASS_PRAGMA_NO_UNROLL _Pragma("unroll 1")
#else
#define CUTLASS_PRAGMA_UNROLL #pragma unroll
#define CUTLASS_PRAGMA_NO_UNROLL #pragma unroll 1
#endif
#define CUTLASS_GEMM_LOOP CUTLASS_PRAGMA_NO_UNROLL
#else
#define CUTLASS_PRAGMA_UNROLL
#define CUTLASS_PRAGMA_NO_UNROLL
#define CUTLASS_GEMM_LOOP
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if !defined(__CUDACC_RTC__)
#define CUTLASS_THREAD_LOCAL thread_local
#else
#define CUTLASS_THREAD_LOCAL
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(_MSVC_LANG)
# define CUTLASS_CPLUSPLUS _MSVC_LANG
#else
# define CUTLASS_CPLUSPLUS __cplusplus
#endif
#if (201700L <= CUTLASS_CPLUSPLUS)
#define CUTLASS_CONSTEXPR_IF_CXX17 constexpr
#define CUTLASS_CXX17_OR_LATER 1
#else
#define CUTLASS_CONSTEXPR_IF_CXX17
#define CUTLASS_CXX17_OR_LATER 0
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
}; // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/detail/helper_macros.hpp/0 | {
"file_path": "include/cutlass/detail/helper_macros.hpp",
"repo_id": "include",
"token_count": 2249
} | 22 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/scale_type.h"
//////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue {
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
//
// Builder Epilogue Schedules
//
//////////////////////////////////////////////////////////////////////////////
struct PtrArrayDefault {};
struct NoSmemWarpSpecialized {};
struct PtrArrayNoSmemWarpSpecialized {};
struct PtrArrayPlanarComplexNoSmemWarpSpecialized {};
struct TmaWarpSpecialized {};
struct TmaWarpSpecializedCooperative {};
struct PtrArrayTmaWarpSpecializedCooperative {};
// DEPRECATED schedules, will be removed in next release
struct TmaWarpSpecializedElementwiseBase : public TmaWarpSpecialized {};
struct TmaWarpSpecializedCooperativeElementwiseBase : public TmaWarpSpecializedCooperative {};
template <
template <class T> class ActivationFunctor_,
thread::ScaleType::Kind Scale_ = thread::ScaleType::Default,
FloatRoundStyle Round_ = FloatRoundStyle::round_to_nearest
>
struct [[deprecated("Use TmaWarpSpecialized with fusion::LinCombEltAct instead")]]
TmaWarpSpecializedElementwise : public TmaWarpSpecializedElementwiseBase {
template <class T>
using ActivationFunctor = ActivationFunctor_<T>;
static constexpr thread::ScaleType::Kind Scale = Scale_;
static constexpr FloatRoundStyle Round = Round_;
};
template <
template <class T> class ActivationFunctor_,
thread::ScaleType::Kind Scale_ = thread::ScaleType::Default,
FloatRoundStyle Round_ = FloatRoundStyle::round_to_nearest
>
struct [[deprecated("Use TmaWarpSpecializedCooperative with fusion::LinCombEltAct instead")]]
TmaWarpSpecializedCooperativeElementwise : public TmaWarpSpecializedCooperativeElementwiseBase {
template <class T>
using ActivationFunctor = ActivationFunctor_<T>;
static constexpr thread::ScaleType::Kind Scale = Scale_;
static constexpr FloatRoundStyle Round = Round_;
};
struct TmaWarpSpecializedBiasElementwiseBase : public TmaWarpSpecialized{};
struct TmaWarpSpecializedCooperativeBiasElementwiseBase : public TmaWarpSpecializedCooperative {};
template <
template <class T> class ActivationFunctor_,
class ElementT_,
template <class T> class BiasOp_,
bool StoreT_,
class ElementBias_
>
struct [[deprecated("Use TmaWarpSpecialized with fusion::LinCombPerRowBiasEltActAux instead")]]
TmaWarpSpecializedBiasElementwise : public TmaWarpSpecializedBiasElementwiseBase {
template <class T>
using ActivationFunctor = ActivationFunctor_<T>;
using ElementT = ElementT_;
template <class T>
using BiasOp = BiasOp_<T>;
static constexpr bool StoreT = StoreT_;
using ElementBias = ElementBias_;
};
template <
template <class T> class ActivationFunctor_,
class ElementT_,
template <class T> class BiasOp_,
bool StoreT_,
class ElementBias_
>
struct [[deprecated("Use TmaWarpSpecializedCooperative with fusion::LinCombPerRowBiasEltActAux instead")]]
TmaWarpSpecializedCooperativeBiasElementwise : public TmaWarpSpecializedCooperativeBiasElementwiseBase {
template <class T>
using ActivationFunctor = ActivationFunctor_<T>;
using ElementT = ElementT_;
template <class T>
using BiasOp = BiasOp_<T>;
static constexpr bool StoreT = StoreT_;
using ElementBias = ElementBias_;
};
//////////////////////////////////////////////////////////////////////////////
//
// Collective Dispatch Policies
//
//////////////////////////////////////////////////////////////////////////////
template<
int StagesC_,
int StagesD_,
int FragmentSize_,
bool ReuseSmemC_,
bool DelayTmaStore_
>
struct Sm90TmaWarpSpecialized {
constexpr static int StagesC = StagesC_;
constexpr static int StagesD = StagesD_;
constexpr static int FragmentSize = FragmentSize_;
constexpr static bool ReuseSmemC = ReuseSmemC_;
constexpr static bool DelayTmaStore = DelayTmaStore_;
};
template<
int StagesC_,
int StagesD_,
int FragmentSize_,
bool ReuseSmemC_,
bool DelayTmaStore_
>
struct Sm90PtrArrayTmaWarpSpecialized {
constexpr static int StagesC = StagesC_;
constexpr static int StagesD = StagesD_;
constexpr static int FragmentSize = FragmentSize_;
constexpr static bool ReuseSmemC = ReuseSmemC_;
constexpr static bool DelayTmaStore = DelayTmaStore_;
};
// DEPRECATED policies, will be removed in next release
template<
int StagesC_,
int StagesD_,
int FragmentSize_ = 2
>
struct Sm90TmaWarpSpecializedBiasElementwise {
constexpr static int StagesC = StagesC_;
constexpr static int StagesD = StagesD_;
constexpr static int FragmentSize = FragmentSize_;
};
//////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue
| include/cutlass/epilogue/dispatch_policy.hpp/0 | {
"file_path": "include/cutlass/epilogue/dispatch_policy.hpp",
"repo_id": "include",
"token_count": 1887
} | 23 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing elementwise operations used by epilogues.
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/epilogue/threadblock/epilogue_base.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
namespace detail {
struct EVT2xBase { };
template <class T>
static constexpr bool is_2x_evt_v = platform::is_base_of<EVT2xBase, T>::value;
} // namespace detail
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename DefaultEpilogue, ///< Default Epilogue Descriptor
typename FusionCallbacks_, ///< The called fusion callbacks
int Stages = 2, ///< Software pipeline stages for epilogue
int IterationsUnroll = true ///< Used to reduce binary size when epilogue op is large
>
class EpilogueWithVisitorCallbacks :
public EpilogueBase<
typename DefaultEpilogue::Shape,
typename DefaultEpilogue::WarpMmaOperator::Shape,
DefaultEpilogue::kPartitionsK,
typename DefaultEpilogue::AccumulatorFragmentIterator,
typename DefaultEpilogue::WarpTileIterator,
typename DefaultEpilogue::Padding,
DefaultEpilogue::kFragmentsPerIteration>,
public EpilogueBaseStreamK<
typename DefaultEpilogue::Shape,
DefaultEpilogue::kPartitionsK,
typename DefaultEpilogue::WarpMmaOperator,
typename DefaultEpilogue::AccumulatorFragmentIterator>,
public detail::EVT2xBase
{
public:
static_assert(Stages <= 2, "Sm80 EVT only support upto 2 Stages.");
// Whether the epilogue is pipelined
static bool constexpr Pipelined = Stages > 1;
using FusionCallbacks = FusionCallbacks_;
using OutputTileIterator = typename DefaultEpilogue::OutputTileIterator;
// Number of epilogue iterations.
// Each iteration processes a 8xThreadblockTile::kN output tile
static const int kIterations = OutputTileIterator::kIterations;
using Base = EpilogueBase<
typename DefaultEpilogue::Shape,
typename DefaultEpilogue::WarpMmaOperator::Shape,
DefaultEpilogue::kPartitionsK,
typename DefaultEpilogue::AccumulatorFragmentIterator,
typename DefaultEpilogue::WarpTileIterator,
typename DefaultEpilogue::Padding,
DefaultEpilogue::kFragmentsPerIteration>;
using BaseStreamK = EpilogueBaseStreamK<
typename DefaultEpilogue::Shape,
DefaultEpilogue::kPartitionsK,
typename DefaultEpilogue::WarpMmaOperator,
typename DefaultEpilogue::AccumulatorFragmentIterator>;
static int const kPartitionsK = DefaultEpilogue::kPartitionsK;
using AccumulatorFragmentIterator = typename DefaultEpilogue::AccumulatorFragmentIterator;
using WarpTileIterator = typename DefaultEpilogue::WarpTileIterator;
using SharedLoadIterator = typename DefaultEpilogue::SharedLoadIterator;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
struct OutputOp{
using ElementAccumulator = ElementAccumulator;
using Params = typename FusionCallbacks::Arguments;
};
/// Fragment type used by the accumulator tile's fragment iterator
using AccumulatorFragment = typename AccumulatorFragmentIterator::Fragment;
// Output access size
static int const kElementsPerAccess = DefaultEpilogue::kElementsPerAccess;
/// Array type used by output functor
using AccumulatorAccessType = Array<
typename WarpTileIterator::Element, kElementsPerAccess>;
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
using Params = typename FusionCallbacks::Params;
static size_t constexpr kSmemStageOffset = sizeof(Base::SharedStorage) / sizeof(ElementAccumulator);
static int constexpr kAccumulatorFragmentCount = AccumulatorTile::kElements / (kIterations * AccumulatorAccessType::kElements) / kPartitionsK;
struct SharedStorage {
typename Base::SharedStorage acc_smem[Stages];
typename FusionCallbacks::SharedStorage callback_smem;
};
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
FusionCallbacks fusion_callbacks;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithVisitorCallbacks(
const Params ¶ms_callbacks, ///< Epilogue Visitor params
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
Base(shared_storage.acc_smem[0], thread_idx, warp_idx, lane_idx),
BaseStreamK(thread_idx),
shared_load_iterator_(shared_storage.acc_smem[0].reference(), thread_idx),
fusion_callbacks(params_callbacks, shared_storage.callback_smem)
{ }
/// Aggregates the accumulator sets shared by peer blocks in the global workspace,
/// performing epilogue computations, writing to output
template <class ProblemShape>
CUTLASS_DEVICE
void reduce(
int peer_idx_begin,
int peer_idx_end,
int reduce_fragment_idx,
void *element_workspace,
cutlass::gemm::GemmCoord threadblock_tile_offset,
ProblemShape problem_shape,
int thread_idx)
{
auto callbacks = fusion_callbacks.get_callbacks(
threadblock_tile_offset,
thread_idx,
problem_shape
);
callbacks.begin_epilogue();
// Reduce peer accumulator fragments into one fragment
AccumulatorFragment accum_fragment;
BaseStreamK::reduce(accum_fragment, peer_idx_begin, peer_idx_end, reduce_fragment_idx, element_workspace);
// Store fragment to shared memory
this->warp_tile_iterator_.store(accum_fragment);
__syncthreads();
callbacks.begin_step(reduce_fragment_idx);
// Load fragment from shared memory
typename SharedLoadIterator::Fragment aligned_accum_fragment;
shared_load_iterator_.load(aligned_accum_fragment);
// Add fragments shared by other k partitions
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
typename SharedLoadIterator::Fragment aligned_addend_fragment;
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_addend_fragment);
aligned_accum_fragment = add_fragments(aligned_accum_fragment, aligned_addend_fragment);
}
}
//
// Iterate over output fragment
//
AccumulatorAccessType const *accum_frag_ptr =
reinterpret_cast<AccumulatorAccessType const*>(&aligned_accum_fragment);
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < kAccumulatorFragmentCount; ++idx) {
int row_idx = idx / SharedLoadIterator::ThreadMap::Iterations::kColumn;
int col_idx = idx % SharedLoadIterator::ThreadMap::Iterations::kColumn;
// Start a new row of the output fragment
if (!col_idx) {
callbacks.begin_row(row_idx);
}
callbacks.visit(
reduce_fragment_idx,
row_idx,
col_idx,
idx,
accum_frag_ptr[idx]
);
// End the row of the output fragment
if (col_idx + 1 == SharedLoadIterator::ThreadMap::Iterations::kColumn) {
callbacks.end_row(row_idx);
}
}
callbacks.end_step(reduce_fragment_idx);
callbacks.end_epilogue();
}
/// Streams the result to global memory
template <class ProblemShape>
CUTLASS_DEVICE
void operator()(
AccumulatorTile const &accumulators,
cutlass::gemm::GemmCoord threadblock_tile_offset,
ProblemShape problem_shape,
int thread_idx
) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
auto callbacks = fusion_callbacks.get_callbacks(
threadblock_tile_offset,
thread_idx,
problem_shape
);
callbacks.begin_epilogue();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
if constexpr(Pipelined){
__syncthreads();
//
// Pipeline Prologue
//
size_t warp_iterator_offset = kSmemStageOffset;
size_t smem_iterator_offset = kSmemStageOffset;
callbacks.begin_step(0);
acc2smem_source_needed<cutlass::make_index_sequence<kIterations>>::push(
0, accum_fragment_iterator, this->warp_tile_iterator_);
this->warp_tile_iterator_.add_pointer_offset(warp_iterator_offset);
warp_iterator_offset = -warp_iterator_offset;
//
// Pipeline Loop
//
#pragma unroll(IterationsUnroll ? kIterations : 1)
for (int iter_idx = 1; iter_idx < kIterations + 1; ++iter_idx) {
__syncthreads();
// Skip the load for epilogue
if (iter_idx < kIterations) {
callbacks.begin_step(iter_idx);
acc2smem_source_needed<cutlass::make_index_sequence<kIterations>>::push(
iter_idx, accum_fragment_iterator, this->warp_tile_iterator_);
this->warp_tile_iterator_.add_pointer_offset(warp_iterator_offset);
warp_iterator_offset = -warp_iterator_offset;
}
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
shared_load_iterator_.add_pointer_offset(smem_iterator_offset);
smem_iterator_offset = -smem_iterator_offset;
//
// Iterate over output fragments
//
AccumulatorAccessType const *accum_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < kAccumulatorFragmentCount; ++idx) {
int row_idx = idx / SharedLoadIterator::ThreadMap::Iterations::kColumn;
int col_idx = idx % SharedLoadIterator::ThreadMap::Iterations::kColumn;
// Start a new row of the output fragment
if (!col_idx) {
callbacks.begin_row(row_idx);
}
callbacks.visit(
iter_idx-1,
row_idx,
col_idx,
idx,
accum_frag_ptr[idx]
);
// End the row of the output fragment
if (col_idx + 1 == SharedLoadIterator::ThreadMap::Iterations::kColumn) {
callbacks.end_row(row_idx);
}
}
//
// Conclude the step
//
callbacks.end_step(iter_idx-1);
}
} else {
#pragma unroll(IterationsUnroll ? kIterations : 1)
for (int iter_idx = 0; iter_idx < kIterations; ++iter_idx) {
//
// Load the source
//
callbacks.begin_step(iter_idx);
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_needed<cutlass::make_index_sequence<kIterations>>::push(
iter_idx, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Iterate over output fragments
//
AccumulatorAccessType const *accum_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment[0]);
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < kAccumulatorFragmentCount; ++idx) {
int row_idx = idx / SharedLoadIterator::ThreadMap::Iterations::kColumn;
int col_idx = idx % SharedLoadIterator::ThreadMap::Iterations::kColumn;
// Start a new row of the output fragment
if (!col_idx) {
callbacks.begin_row(row_idx);
}
callbacks.visit(
iter_idx,
row_idx,
col_idx,
idx,
accum_frag_ptr[idx]
);
// End the row of the output fragment
if (col_idx + 1 == SharedLoadIterator::ThreadMap::Iterations::kColumn) {
callbacks.end_row(row_idx);
}
}
//
// Conclude the step
//
callbacks.end_step(iter_idx);
}
}
callbacks.end_epilogue();
}
private:
template<class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_with_visitor_callbacks.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_with_visitor_callbacks.h",
"repo_id": "include",
"token_count": 6585
} | 24 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/conv2d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
struct OutputTileShapeDesc {
int column;
int row;
int group;
int cluster;
int tile;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
OutputTileShapeDesc(): column(0), row(0), group(0), cluster(0), tile(0) { }
/// Ctor
CUTLASS_HOST_DEVICE
OutputTileShapeDesc(
int column_,
int row_,
int group_,
int cluster_,
int tile_
):
column(column_),
row(row_),
group(group_),
cluster(cluster_),
tile(tile_) { }
/// Total number of points in the 5D space
CUTLASS_HOST_DEVICE
int count() const {
return column * row * group * cluster * tile;
}
#if 0
CUTLASS_HOST_DEVICE
void print() const {
printf("{%d, %d, %d, %d, %d}", column, row, group, cluster, tile);
}
#endif
};
/// Helper template to construct an OutputTileShapeDesc from a OutputTileShape template.
template <typename Shape>
CUTLASS_HOST_DEVICE
OutputTileShapeDesc make_OutputTileShapeDesc() {
return OutputTileShapeDesc(
Shape::kColumn,
Shape::kRow,
Shape::kGroup,
Shape::kCluster,
Shape::kTile
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Thread map description
struct OutputTileThreadMapDesc {
int threads;
int elements_per_access;
OutputTileShapeDesc shape;
OutputTileShapeDesc iterations;
OutputTileShapeDesc delta;
OutputTileShapeDesc count;
//
// Methods
//
CUTLASS_HOST_DEVICE
OutputTileThreadMapDesc() { }
CUTLASS_HOST_DEVICE
OutputTileThreadMapDesc(
int threads_,
int elements_per_access_,
OutputTileShapeDesc shape_,
OutputTileShapeDesc iterations_,
OutputTileShapeDesc delta_,
OutputTileShapeDesc count_
):
threads(threads_),
elements_per_access(elements_per_access_),
shape(shape_),
iterations(iterations_),
delta(delta_),
count(count_)
{
}
};
/// Helper template to construct an OutputTileShapeDesc from a OutputTileThreadMap template.
template <typename ThreadMap>
CUTLASS_HOST_DEVICE
OutputTileThreadMapDesc make_OutputTileThreadMapDesc() {
return OutputTileThreadMapDesc(
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
make_OutputTileShapeDesc<typename ThreadMap::Shape>(),
make_OutputTileShapeDesc<typename ThreadMap::Iterations>(),
make_OutputTileShapeDesc<typename ThreadMap::Delta>(),
make_OutputTileShapeDesc<typename ThreadMap::Count>()
);
}
///////////////////////////////////////////////////////////////////////////////
//
// Parameters struct for PredicatedTileIterator
//
struct PredicatedTileIteratorParams {
using Index = int32_t;
using LongIndex = int64_t;
//
// Data members
//
LongIndex stride; ///< stride in bytes between rows
LongIndex increment_row; ///< increment quantity (in bytes) to advance when moving between rows
LongIndex increment_group; ///< increment quantity (in bytes) to advance when moving to the next group
LongIndex increment_cluster; ///< increment quantity (in bytes) to advance when moving to the next cluster
LongIndex advance_row; ///< amount to add to move to the next 'row' position
LongIndex advance_group; ///< amount to add to move to the next 'group' position
LongIndex advance_cluster; ///< amount to add to move to the next 'cluster' position
LongIndex advance_tile; ///< amount to add to move to the next 'tile'
//
// Methods
//
CUTLASS_HOST_DEVICE
Status initialize(LongIndex stride_, OutputTileThreadMapDesc thread_map) {
stride = stride_;
increment_row = stride * thread_map.delta.row;
increment_group = stride * thread_map.delta.group
- stride * thread_map.delta.row * (thread_map.iterations.row - 1);
increment_cluster = stride * thread_map.delta.cluster
- stride * thread_map.delta.group * (thread_map.iterations.group - 1)
- stride * thread_map.delta.row * (thread_map.iterations.row - 1);
advance_row = stride * thread_map.shape.row;
advance_group =
stride *
(thread_map.shape.group - 1) * thread_map.shape.row * thread_map.count.row;
advance_cluster =
stride *
thread_map.count.group *
thread_map.shape.group *
thread_map.count.row *
thread_map.shape.row;
advance_tile =
stride *
thread_map.shape.group *
thread_map.shape.row *
thread_map.shape.cluster *
thread_map.shape.tile;
return Status::kSuccess;
}
CUTLASS_HOST_DEVICE
Status initialize(Index stride_, OutputTileThreadMapDesc thread_map) {
return initialize(LongIndex(stride_), thread_map);
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorParams() {
initialize(LongIndex(0), OutputTileThreadMapDesc());
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorParams(Index stride, OutputTileThreadMapDesc thread_map) {
initialize(stride, thread_map);
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorParams(LongIndex stride, OutputTileThreadMapDesc thread_map) {
initialize(stride, thread_map);
}
};
///////////////////////////////////////////////////////////////////////////////
//
// Parameters struct for PredicatedTileIteratorDirect2dConv
//
struct PredicatedTileIteratorDirect2dConvParams{
using Index = int32_t;
using LongIndex = int64_t;
//
// Data members
//
FastDivmod pq_divmod;
FastDivmod q_divmod;
LongIndex stride;
LongIndex stride_n;
LongIndex stride_p;
int N;
int P;
int Q;
//
// Methods
//
CUTLASS_HOST_DEVICE
Status initialize(LongIndex stride_,
cutlass::conv::Conv2dProblemSize const &problem_size,
MatrixCoord threadblock_output_shape) {
stride = stride_; // The stride per row of output tensor (bytes)
stride_n = problem_size.P * problem_size.Q;
stride_p = problem_size.Q ;
N = problem_size.N;
P = problem_size.P;
Q = problem_size.Q;
// Fastdivmod for output O, P, Q
if(threadblock_output_shape.row() != 0 && threadblock_output_shape.column() !=0 ){
// MSVC emits a "potential divide by 0" warning as error
// if the code just divides without a check and substitution.
CUTLASS_ASSERT(threadblock_output_shape.row() != 0);
const auto row_denom = threadblock_output_shape.row() != 0 ?
threadblock_output_shape.row() : cutlass::MatrixCoord::Index(1);
int tiles_p =
(problem_size.P + (threadblock_output_shape.row() - 1)) / row_denom;
CUTLASS_ASSERT(threadblock_output_shape.column() != 0);
const auto col_denom = threadblock_output_shape.column() != 0 ?
threadblock_output_shape.column() : cutlass::MatrixCoord::Index(1);
int tiles_q = (problem_size.Q + (threadblock_output_shape.column() - 1)) /
col_denom;
pq_divmod = FastDivmod(tiles_p * tiles_q);
q_divmod = FastDivmod(tiles_q);
}
return Status::kSuccess;
}
CUTLASS_HOST_DEVICE
Status initialize(
Index stride_,
cutlass::conv::Conv2dProblemSize const &problem_size = cutlass::conv::Conv2dProblemSize(),
MatrixCoord threadblock_output_shape = MatrixCoord()) {
return initialize(LongIndex(stride_), problem_size, threadblock_output_shape);
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorDirect2dConvParams() { initialize(LongIndex(0)); }
CUTLASS_HOST_DEVICE
PredicatedTileIteratorDirect2dConvParams(Index stride,
cutlass::conv::Conv2dProblemSize const &problem_size,
MatrixCoord threadblock_output_shape) {
initialize(stride, problem_size, threadblock_output_shape);
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorDirect2dConvParams(LongIndex stride,
cutlass::conv::Conv2dProblemSize const &problem_size,
MatrixCoord threadblock_output_shape) {
initialize(stride, problem_size, threadblock_output_shape);
}
};
///////////////////////////////////////////////////////////////////////////////
// InterleavedPredicatedTileIterator
///////////////////////////////////////////////////////////////////////////////
/// Predicated tile access iterator descriptor object containing template dependent state
struct InterleavedPredicatedTileIteratorDesc {
int element_size_bits;
int elements_per_access;
int threadmap_warp_size;
layout::PitchLinearCoord threadmap_iterations;
layout::PitchLinearCoord threadmap_delta;
//
// Methods
//
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorDesc() { }
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorDesc(
int element_size_bits_,
int elements_per_access_,
int threadmap_warp_size_,
layout::PitchLinearCoord threadmap_iterations_,
layout::PitchLinearCoord threadmap_delta_
):
element_size_bits(element_size_bits_),
elements_per_access(elements_per_access_),
threadmap_warp_size(threadmap_warp_size_),
threadmap_iterations(threadmap_iterations_),
threadmap_delta(threadmap_delta_) { }
};
//
// Parameters struct InterleavedPredicatedTileIterator
//
struct InterleavedPredicatedTileIteratorParams {
using Index = int32_t;
using LongIndex = int64_t;
//
// Data members
//
LongIndex stride; ///< stride in bytes between rows
LongIndex advance_row; ///< amount to add to move to the next 'row' position
LongIndex advance_column; ///< amount to add to move to the next 'column' position
//
// Methods
//
CUTLASS_HOST_DEVICE
Status initialize(LongIndex stride_, InterleavedPredicatedTileIteratorDesc desc) {
stride = stride_;
advance_row = desc.threadmap_delta.contiguous() * desc.element_size_bits / 8;
advance_column = stride_ - desc.threadmap_iterations.contiguous() *
desc.elements_per_access *
desc.element_size_bits *
desc.threadmap_warp_size / 8;
return Status::kSuccess;
}
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorParams() {
initialize(LongIndex(0), InterleavedPredicatedTileIteratorDesc());
}
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorParams(Index stride, InterleavedPredicatedTileIteratorDesc desc) {
initialize(stride, desc);
}
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorParams(LongIndex stride, InterleavedPredicatedTileIteratorDesc desc) {
initialize(stride, desc);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper template to construct an OutputTileShapeDesc from a OutputTileThreadMap template.
template <typename Element, typename ThreadMap>
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorDesc make_InterleavedPredicatedTileIteratorDesc() {
return InterleavedPredicatedTileIteratorDesc(
sizeof_bits<Element>::value,
ThreadMap::kElementsPerAccess,
ThreadMap::kWarpSize,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper template to construct an MakePredicatedTileIteratorDesc from a template
// dependent state
template <typename Element, typename Layout,
typename ThreadMap>
struct MakePredicatedTileIteratorDesc;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for layout::RowMajor output data.
template <typename Element, typename ThreadMap>
struct MakePredicatedTileIteratorDesc <
Element, layout::RowMajor, ThreadMap> {
CUTLASS_HOST_DEVICE
OutputTileThreadMapDesc operator()() {
return make_OutputTileThreadMapDesc<ThreadMap>();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for layout::ColumnMajorInterleaved<InterleavedN> output data.
template <typename Element, typename ThreadMap, int InterleavedN>
struct MakePredicatedTileIteratorDesc <
Element, layout::ColumnMajorInterleaved<InterleavedN>, ThreadMap> {
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorDesc operator()() {
return make_InterleavedPredicatedTileIteratorDesc<Element, ThreadMap>();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/predicated_tile_iterator_params.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/predicated_tile_iterator_params.h",
"repo_id": "include",
"token_count": 4942
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/epilogue/warp/tensor_op_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
// This is an optimization available on CUDA 11.2 and beyond that eliminates branches in the epilogue.
#define CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED ((__CUDACC_VER_MAJOR__ * 10 + __CUDACC_VER_MINOR__) >= 112)
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory. This is optimized
/// for mixed-precision epilogues in which the accumulators are 32b in width, but the output
/// data type is smaller.
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename Element_, ///< data type of accumulator element
int ElementSizeBits, ///< Size of accumulator element in bits
int OutputSizeBits, ///< Size of output element in bits
int OutputElementCount, ///< number of elements in output vector
int ContiguousLanes, ///< Number of consecutive lanes writing to contiguous memory
bool EightBitsOutputOrLess = (OutputSizeBits <= 8)
>
class TileIteratorTensorOpMixed {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kOutputElementCount = OutputElementCount;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
/// Number of pointers needed to write accumulators
static int const kPointerCount =
(OutputElementCount * sizeof_bits<Element>::value) / (const_min(128, OutputElementCount * sizeof_bits<Element>::value));
// Currently support max 4 ptr
static constexpr int kMaxPointerCount{4};
static_assert(kPointerCount <= kMaxPointerCount, "Can only accommodate four pointers at present.");
static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32).");
};
/// Padding quantity
using Padding = MatrixShape<
0,
Detail::kLanesInQuad * Policy::kElementsPerAccess>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointers_[Detail::kPointerCount] = {nullptr};
/// Stride in units of AccessType
int stride_{0};
/// Logical column in which warp tile is aligned
int warp_column_{0};
public:
/// Default constructor
TileIteratorTensorOpMixed() = default;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed(
TensorRef const &ref,
unsigned lane_id
):
stride_(ref.stride()[0] / Policy::kElementsPerAccess),
warp_column_(0) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_;
int column_idx = (lane_in_quad % 2) + (((lane_in_quad / 2) + i) % Detail::kPointerCount) * 2;
ptr += column_idx;
pointers_[i % Detail::kPointerCount] = ptr;
}
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += pointer_offset / Policy::kElementsPerAccess;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += tile_offset.row() * Shape::kRow * stride_ +
tile_offset.column() * Shape::kColumn / Policy::kElementsPerAccess;
}
warp_column_ += tile_offset.column() * Shape::kColumn;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) {
return add_tile_offset(tile_offset);
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
AccessType *ptr = pointers_[0];
#if CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED
// When the optimization is enabled, small tiles require separate logic.
bool kN32_optimization = (WarpShape::kN * Detail::kLanesInQuad * Policy::kElementsPerAccess * sizeof_bits<Element>::value) % 1024 == 0;
if (kN32_optimization) {
int ptr_idx = ((warp_column_ * sizeof_bits<Element>::value) / 1024) % Detail::kPointerCount;
if (ptr_idx == 0) {
ptr = pointers_[0];
} else if (ptr_idx == 1) {
ptr = pointers_[1];
} else if (ptr_idx == 2) {
ptr = pointers_[2];
} else if (ptr_idx == 3) {
ptr = pointers_[3];
}
}
#endif
CUTLASS_PRAGMA_UNROLL
for (int64_t n = 0; n < Policy::OperatorCount::kColumn; ++n) {
#if CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED
//
// When the optimization is enabled, this expression suffices to obtain the SMEM pointer.
//
if (WarpShape::kN == 64) {
ptr = pointers_[n / 4];
}
else if (!kN32_optimization)
#endif
{
// This is the reference implementation
int column_idx = warp_column_ + n * Detail::kLanesInQuad * Policy::kElementsPerAccess;
int ptr_idx = ((column_idx * sizeof_bits<Element>::value) / 1024) % Detail::kPointerCount;
if (ptr_idx == 0) {
ptr = pointers_[0 % Detail::kPointerCount];
}
else if (ptr_idx == 1) {
ptr = pointers_[1 % Detail::kPointerCount];
}
else if (ptr_idx == 2) {
ptr = pointers_[2 % Detail::kPointerCount];
}
else if (ptr_idx == 3) {
ptr = pointers_[3 % Detail::kPointerCount];
}
}
int offset = n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess;
ptr[offset] = frag_ptr[n];
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int64_t n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int column_idx = warp_column_ + n * Detail::kLanesInQuad * Policy::kElementsPerAccess;
int ptr_idx = ((column_idx * sizeof_bits<Element>::value) / 1024) % Detail::kPointerCount;
AccessType const *smem_ptr = pointers_[ptr_idx];
frag_ptr[n] = smem_ptr[n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess];
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for int32_t x 16 => int8_t/int4b_t x 16
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape),
int OutputSizeBits ///< Size of output element in bits
>
class TileIteratorTensorOpMixed<WarpShape_, OperatorShape_, int32_t, 32, OutputSizeBits, 16, 8, true> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = int32_t;
using Layout = layout::RowMajor;
static int const kOutputElementCount = 16;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
/// Number of pointers needed to write accumulators
static int const kPointerCount = 2;
/// Offsets added
static int const kOffsetCount = 4;
static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32).");
};
/// Padding quantity
using Padding = MatrixShape<0, Detail::kLanesInQuad * 2>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, 2>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointers_[Detail::kPointerCount] = {nullptr};
/// Stride in units of AccessType
int stride_{0};
/// Uniform offset in bytes added to warp tile iterator
int uniform_offset_[Detail::kOffsetCount] = {0};
public:
/// Default constructor
TileIteratorTensorOpMixed() = default;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed(
TensorRef const &ref,
unsigned lane_id
):
stride_(ref.stride()[0] / AccessType::kElements) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_;
int column_idx = lane_in_quad ^ (i * 2);
ptr += column_idx;
if (i == 0) {
pointers_[0] = ptr;
}
else if (i == 1) {
pointers_[1] = ptr;
}
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kOffsetCount; ++i) {
uniform_offset_[i] = (i ^ 0) * 4 * sizeof(AccessType);
}
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += pointer_offset / AccessType::kElements;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) {
int ptr_offset = tile_offset.row() * Shape::kRow * stride_ +
tile_offset.column() * Shape::kColumn / AccessType::kElements;
pointers_[0] += ptr_offset;
pointers_[1] += ptr_offset;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kOffsetCount; ++i) {
uniform_offset_[i] = (i ^ tile_offset.column()) * 4 * sizeof(AccessType);
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) {
return add_tile_offset(tile_offset);
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int ptr_idx = (n / 4);
int offset_idx = (n % 4);
AccessType *ptr;
if (ptr_idx == 0) {
ptr = pointers_[0];
}
else if (ptr_idx == 1) {
ptr = pointers_[1];
}
int offset = (n / 4) * 16 + pointer_offset / AccessType::kElements;
#if 0
//
// Using inline PTX to avoid generic memory
//
AccessType *smem_ptr = pointers_[ptr_idx];
smem_ptr[offset] = frag_ptr[n];
#else
uint32_t smem_addr = arch::cutlass_get_smem_pointer(ptr);
uint32_t const *data = reinterpret_cast<uint32_t const *>(frag_ptr + n);
uint32_t offset_in_bytes = offset * sizeof(AccessType) + uniform_offset_[offset_idx];
asm volatile(
"{ .reg .u32 smem_ptr; add.u32 smem_ptr, %0, %1; st.shared.v2.u32 [smem_ptr], {%2, %3}; }\n"
: : "r"(smem_addr), "r"(offset_in_bytes), "r"(data[0]), "r"(data[1])
);
#endif
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for int32_t x 8 => int8_t/int4b_t x 8
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
int OutputSizeBits ///< Size of output element in bits
>
class TileIteratorTensorOpMixed<WarpShape_, OperatorShape_, int32_t, 32, OutputSizeBits, 8, 8, true> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = int32_t;
using Layout = layout::RowMajor;
static int const kOutputElementCount = 8;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
/// Number of pointers needed to write accumulators
static int const kPointerCount = 2;
static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32).");
};
/// Padding quantity
using Padding = MatrixShape<0, Detail::kLanesInQuad * 2>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, 2>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointers_[Detail::kPointerCount] = {nullptr};
/// Stride in units of AccessType
int stride_{0};
public:
/// Default constructor
TileIteratorTensorOpMixed() = default;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed(
TensorRef const &ref,
unsigned lane_id
):
stride_(ref.stride()[0] / AccessType::kElements) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_;
int column_idx = lane_in_quad ^ (i * 2);
ptr += column_idx;
if (i == 0) {
pointers_[0] = ptr;
}
else if (i == 1) {
pointers_[1] = ptr;
}
}
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += pointer_offset / AccessType::kElements;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) {
int ptr_offset = tile_offset.row() * Shape::kRow * stride_ +
tile_offset.column() * Shape::kColumn / AccessType::kElements;
pointers_[0] += ptr_offset;
pointers_[1] += ptr_offset;
if (tile_offset.column() % 2) {
auto tmp = pointers_[0];
pointers_[0] = pointers_[1];
pointers_[1] = tmp;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) {
return add_tile_offset(tile_offset);
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int ptr_idx = (n / 4);
AccessType *ptr;
if (ptr_idx == 0) {
ptr = pointers_[0];
}
else if (ptr_idx == 1) {
ptr = pointers_[1];
}
int offset = (n / 4) * 16 + pointer_offset / AccessType::kElements + (n % 4) * 4;
#if 0
//
// Using inline PTX to avoid generic memory
//
AccessType *smem_ptr = pointers_[ptr_idx];
smem_ptr[offset] = frag_ptr[n];
#else
uint32_t smem_addr = arch::cutlass_get_smem_pointer(ptr);
uint32_t const *data = reinterpret_cast<uint32_t const *>(frag_ptr + n);
uint32_t offset_in_bytes = offset * sizeof(AccessType);
asm volatile(
"{ .reg .u32 smem_ptr; add.u32 smem_ptr, %0, %1; st.shared.v2.u32 [smem_ptr], {%2, %3}; }\n"
: : "r"(smem_addr), "r"(offset_in_bytes), "r"(data[0]), "r"(data[1])
);
#endif
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float x 16 => float_e4m3_t/float_e5m2_t x 16
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_ ///< matrix multiply operation shape (concept: gemm::GemmShape),
>
class TileIteratorTensorOpMixed<WarpShape_, OperatorShape_, float, 32, 8, 16, 8> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = float;
using Layout = layout::RowMajor;
static int const kOutputElementCount = 16;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
/// Number of pointers needed to write accumulators
static int const kPointerCount = 2;
/// Offsets added
static int const kOffsetCount = 4;
static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32).");
};
/// Padding quantity
using Padding = MatrixShape<0, Detail::kLanesInQuad * 2>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, 2>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointers_[Detail::kPointerCount] = {nullptr};
/// Stride in units of AccessType
int stride_{0};
/// Uniform offset in bytes added to warp tile iterator
int uniform_offset_[Detail::kOffsetCount] = {0};
public:
/// Default constructor
TileIteratorTensorOpMixed() = default;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed(
TensorRef const &ref,
unsigned lane_id
):
stride_(ref.stride()[0] / AccessType::kElements) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_;
int column_idx = lane_in_quad ^ (i * 2);
ptr += column_idx;
if (i == 0) {
pointers_[0] = ptr;
}
else if (i == 1) {
pointers_[1] = ptr;
}
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kOffsetCount; ++i) {
uniform_offset_[i] = (i ^ 0) * 4 * sizeof(AccessType);
}
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += pointer_offset / AccessType::kElements;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) {
int ptr_offset = tile_offset.row() * Shape::kRow * stride_ +
tile_offset.column() * Shape::kColumn / AccessType::kElements;
pointers_[0] += ptr_offset;
pointers_[1] += ptr_offset;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kOffsetCount; ++i) {
uniform_offset_[i] = (i ^ tile_offset.column()) * 4 * sizeof(AccessType);
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) {
return add_tile_offset(tile_offset);
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int ptr_idx = (n / 4);
int offset_idx = (n % 4);
AccessType *ptr;
if (ptr_idx == 0) {
ptr = pointers_[0];
}
else if (ptr_idx == 1) {
ptr = pointers_[1];
}
int offset = (n / 4) * 16 + pointer_offset / AccessType::kElements;
#if 0
//
// Using inline PTX to avoid generic memory
//
AccessType *smem_ptr = pointers_[ptr_idx];
smem_ptr[offset] = frag_ptr[n];
#else
uint32_t smem_addr = arch::cutlass_get_smem_pointer(ptr);
uint32_t const *data = reinterpret_cast<uint32_t const *>(frag_ptr + n);
uint32_t offset_in_bytes = offset * sizeof(AccessType) + uniform_offset_[offset_idx];
asm volatile(
"{ .reg .u32 smem_ptr; add.u32 smem_ptr, %0, %1; st.shared.v2.u32 [smem_ptr], {%2, %3}; }\n"
: : "r"(smem_addr), "r"(offset_in_bytes), "r"(data[0]), "r"(data[1])
);
#endif
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float x 8 => float_e4m3_t/float_e5m2_t x 8
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_ ///< matrix multiply operation shape (concept: gemm::GemmShape)
>
class TileIteratorTensorOpMixed<WarpShape_, OperatorShape_, float, 32, 8, 8, 8> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = float;
using Layout = layout::RowMajor;
static int const kOutputElementCount = 8;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
/// Number of pointers needed to write accumulators
static int const kPointerCount = 2;
static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32).");
};
/// Padding quantity
using Padding = MatrixShape<0, Detail::kLanesInQuad * 2>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, 2>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointers_[Detail::kPointerCount] = {nullptr};
/// Stride in units of AccessType
int stride_{0};
public:
/// Default constructor
TileIteratorTensorOpMixed() = default;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed(
TensorRef const &ref,
unsigned lane_id
):
stride_(ref.stride()[0] / AccessType::kElements) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_;
int column_idx = lane_in_quad ^ (i * 2);
ptr += column_idx;
if (i == 0) {
pointers_[0] = ptr;
}
else if (i == 1) {
pointers_[1] = ptr;
}
}
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += pointer_offset / AccessType::kElements;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) {
int ptr_offset = tile_offset.row() * Shape::kRow * stride_ +
tile_offset.column() * Shape::kColumn / AccessType::kElements;
pointers_[0] += ptr_offset;
pointers_[1] += ptr_offset;
if (tile_offset.column() % 2) {
auto tmp = pointers_[0];
pointers_[0] = pointers_[1];
pointers_[1] = tmp;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) {
return add_tile_offset(tile_offset);
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int ptr_idx = (n / 4);
AccessType *ptr;
if (ptr_idx == 0) {
ptr = pointers_[0];
}
else if (ptr_idx == 1) {
ptr = pointers_[1];
}
int offset = (n / 4) * 16 + pointer_offset / AccessType::kElements + (n % 4) * 4;
#if 0
//
// Using inline PTX to avoid generic memory
//
AccessType *smem_ptr = pointers_[ptr_idx];
smem_ptr[offset] = frag_ptr[n];
#else
uint32_t smem_addr = arch::cutlass_get_smem_pointer(ptr);
uint32_t const *data = reinterpret_cast<uint32_t const *>(frag_ptr + n);
uint32_t offset_in_bytes = offset * sizeof(AccessType);
asm volatile(
"{ .reg .u32 smem_ptr; add.u32 smem_ptr, %0, %1; st.shared.v2.u32 [smem_ptr], {%2, %3}; }\n"
: : "r"(smem_addr), "r"(offset_in_bytes), "r"(data[0]), "r"(data[1])
);
#endif
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#undef CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h/0 | {
"file_path": "include/cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h",
"repo_id": "include",
"token_count": 12302
} | 26 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cute/algorithm/functional.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/algorithm/gemm.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/tensor_predicate.hpp"
#include "cutlass/gemm/collective/collective_mma_decl.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
using namespace cute;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
class TileShape_,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm70TwoStageUnpredicated,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm70TwoStageUnpredicated;
using TileShape = TileShape_;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
using SmemLayoutA = decltype(tile_to_shape(
SmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}))));
using SmemLayoutB = decltype(tile_to_shape(
SmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}))));
struct SharedStorage
{
cute::array_aligned<ElementA, cute::cosize_v<SmemLayoutA>> smem_a;
cute::array_aligned<ElementB, cute::cosize_v<SmemLayoutB>> smem_b;
};
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A;
StrideA dA;
ElementB const* ptr_B;
StrideB dB;
};
// Device side kernel params
using Params = Arguments;
//
// Methods
//
CollectiveMma() = default;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& _, Arguments const& args, void* workspace) {
(void) workspace;
return args;
}
/// Perform a threadblock-scoped matrix multiply-accumulate
template <
class FrgTensorD,
class TensorA,
class TensorB,
class FrgTensorC,
class KTileIterator,
class ResidueMNK
>
CUTLASS_DEVICE void
operator() (
FrgTensorD &accum,
TensorA gA,
TensorB gB,
FrgTensorC const &src_accum,
KTileIterator k_tile_iter, int k_tile_count,
ResidueMNK residue_mnk,
int thread_idx,
char *smem_buf)
{
using namespace cute;
(void)residue_mnk;
static_assert(is_rmem<FrgTensorD>::value, "D tensor must be rmem resident.");
static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident.");
static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident.");
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 2,
"MainloopTwoStage must not have a smem shape with a pipeline mode.");
static_assert(cute::rank(SmemLayoutB{}) == 2,
"MainloopTwoStage must not have a smem shape with a pipeline mode.");
// Construct shared memory tiles
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf);
Tensor sA = make_tensor(make_smem_ptr(storage.smem_a.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(storage.smem_b.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
// Partition the copying of A and B tiles across the threads
GmemTiledCopyA gmem_tiled_copy_a;
GmemTiledCopyB gmem_tiled_copy_b;
auto copy_a_thr = gmem_tiled_copy_a.get_slice(thread_idx);
auto copy_b_thr = gmem_tiled_copy_b.get_slice(thread_idx);
Tensor tAgA = copy_a_thr.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k)
Tensor tAsA = copy_a_thr.partition_D(sA); // (ACPY,ACPY_M,ACPY_K)
Tensor tBgB = copy_b_thr.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k)
Tensor tBsB = copy_b_thr.partition_D(sB); // (BCPY,BCPY_N,BCPY_K)
// Allocate the register tiles for double buffering -- same shape as partitioned data
Tensor tArA = make_fragment_like(tAsA); // (ACPY,ACPY_M,ACPY_K)
Tensor tBrB = make_fragment_like(tBsB); // (BCPY,BCPY_N,BCPY_K)
// Tile MMA compute thread partitions and allocate accumulators
TiledMma tiled_mma;
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCrA = thr_mma.partition_fragment_A(sA); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.partition_fragment_B(sB); // (MMA,MMA_M,MMA_K)
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(src_accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(src_accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K
//
// Copy Atom retiling
//
auto thr_copy_A = make_tiled_copy_A(SmemCopyAtomA{}, tiled_mma).get_thread_slice(thread_idx);
Tensor tCsA = thr_copy_A.partition_S(sA);
Tensor tCrA_copy_view = thr_copy_A.retile_D(tCrA);
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // M
auto thr_copy_B = make_tiled_copy_B(SmemCopyAtomB{}, tiled_mma).get_thread_slice(thread_idx);
Tensor tCsB = thr_copy_B.partition_S(sB);
Tensor tCrB_copy_view = thr_copy_B.retile_D(tCrB);
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // N
//
// Prologue
//
// Copy gmem to rmem for the first k_tile
copy(gmem_tiled_copy_a, tAgA(_,_,_,*k_tile_iter), tArA);
copy(gmem_tiled_copy_b, tBgB(_,_,_,*k_tile_iter), tBrB);
if (--k_tile_count > 0) ++k_tile_iter;
// Copy rmem to smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
// Clear accumulators
__syncthreads();
// Load A, B smem->rmem for k=0
copy(tCsA(_,_,0), tCrA_copy_view(_,_,0));
copy(tCsB(_,_,0), tCrB_copy_view(_,_,0));
//
// Mainloop
//
// Size of the k-tiles's outer product mode (k)
auto K_BLOCK_MAX = size<2>(tCrA);
CUTLASS_PRAGMA_NO_UNROLL
while (k_tile_count > -1)
{
// Pipeline the outer products with a static for loop
for_each(make_int_sequence<K_BLOCK_MAX>{}, [&] (auto k_block)
{
if (k_block == K_BLOCK_MAX - 1)
{
__syncthreads();
// Copy rmem to smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
__syncthreads();
}
// Load A, B smem->rmem for k+1
int k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static
copy(tCsA(_,_,k_block_next), tCrA_copy_view(_,_,k_block_next));
copy(tCsB(_,_,k_block_next), tCrB_copy_view(_,_,k_block_next));
if (k_block == 0)
{
// Copy gmem to rmem
copy(gmem_tiled_copy_a, tAgA(_,_,_,*k_tile_iter), tArA);
copy(gmem_tiled_copy_b, tBgB(_,_,_,*k_tile_iter), tBrB);
if (--k_tile_count > 0) ++k_tile_iter;
}
// transform before compute
cute::transform(tCrA(_,_,k_block), TransformA{});
cute::transform(tCrB(_,_,k_block), TransformB{});
// Thread-level register gemm for k
// disambiguate gemm (shared with the namespace name)
cute::gemm(tiled_mma, accum, tCrA(_,_,k_block), tCrB(_,_,k_block), src_accum);
});
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
class TileShape_,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm70TwoStage,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm70TwoStage;
using TileShape = TileShape_;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
using SmemLayoutA = decltype(tile_to_shape(
SmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}))));
using SmemLayoutB = decltype(tile_to_shape(
SmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}))));
struct SharedStorage
{
cute::array_aligned<ElementA, cute::cosize_v<SmemLayoutA>> smem_a;
cute::array_aligned<ElementB, cute::cosize_v<SmemLayoutB>> smem_b;
};
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A;
StrideA dA;
ElementB const* ptr_B;
StrideB dB;
};
// Device side kernel params
using Params = Arguments;
//
// Methods
//
CollectiveMma() = default;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& _, Arguments const& args, void* workspace) {
(void) workspace;
return args;
}
/// Perform a threadblock-scoped matrix multiply-accumulate
template <
class FrgTensorD,
class TensorA,
class TensorB,
class FrgTensorC,
class KTileIterator,
class ResidueMNK
>
CUTLASS_DEVICE void
operator() (
FrgTensorD &accum,
TensorA gA,
TensorB gB,
FrgTensorC const &src_accum,
KTileIterator k_tile_iter, int k_tile_count,
ResidueMNK residue_mnk,
int thread_idx,
char *smem_buf)
{
using namespace cute;
static_assert(is_rmem<FrgTensorD>::value, "D tensor must be rmem resident.");
static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident.");
static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident.");
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 2,
"MainloopTwoStage must not have a smem shape with a pipeline mode.");
static_assert(cute::rank(SmemLayoutB{}) == 2,
"MainloopTwoStage must not have a smem shape with a pipeline mode.");
// Construct shared memory tiles
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf);
Tensor sA = make_tensor(make_smem_ptr(storage.smem_a.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(storage.smem_b.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
// Shift tensor so residue_k is at origin (Can't read any k_coord < residue_k)
// This aligns the tensor with BLK_K for all but the 0th k_tile
gA.data() = &gA(0, get<2>(residue_mnk), 0);
gB.data() = &gB(0, get<2>(residue_mnk), 0);
// Partition the copying of A and B tiles across the threads
GmemTiledCopyA gmem_tiled_copy_a;
GmemTiledCopyB gmem_tiled_copy_b;
auto gmem_thr_copy_a = gmem_tiled_copy_a.get_slice(thread_idx);
auto gmem_thr_copy_b = gmem_tiled_copy_b.get_slice(thread_idx);
Tensor tAgA = gmem_thr_copy_a.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k)
Tensor tAsA = gmem_thr_copy_a.partition_D(sA); // (ACPY,ACPY_M,ACPY_K,PIPE)
Tensor tBgB = gmem_thr_copy_b.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k)
Tensor tBsB = gmem_thr_copy_b.partition_D(sB); // (BCPY,BCPY_N,BCPY_K,PIPE)
// Allocate the register tiles for double buffering -- same shape as partitioned data
Tensor tArA = make_fragment_like(tAsA); // (ACPY,ACPY_M,ACPY_K)
Tensor tBrB = make_fragment_like(tBsB); // (BCPY,BCPY_N,BCPY_K)
//
// PREDICATES
//
// Allocate predicate tensors for m and n
Tensor tApA = make_tensor<bool>(make_shape(size<1>(tAsA), size<2>(tAsA)), Stride<_1,_0>{});
Tensor tBpB = make_tensor<bool>(make_shape(size<1>(tBsB), size<2>(tBsB)), Stride<_1,_0>{});
// Construct identity layout for sA and sB
Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k)
Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k)
// Repeat the partitioning with identity layouts
Tensor tAcA = gmem_thr_copy_a.partition_S(cA); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k)
Tensor tBcB = gmem_thr_copy_b.partition_S(cB); // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k)
// Set predicates for m bounds
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < size<0>(tApA); ++m) {
tApA(m,0) = get<0>(tAcA(0,m,0)) < get<0>(residue_mnk); // blk_m coord < residue_m
}
// Set predicates for n bounds
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < size<0>(tBpB); ++n) {
tBpB(n,0) = get<0>(tBcB(0,n,0)) < get<1>(residue_mnk); // blk_n coord < residue_n
}
//
// PREFETCH
//
// Clear the rmem tiles to account for predicated off loads
clear(tArA);
clear(tBrB);
// Start async loads for 0th k-tile, where we take care of the k residue
{
Tensor tAgAk = tAgA(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tArA); ++k) {
if (get<1>(tAcA(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gA shifted)
copy_if(gmem_tiled_copy_a, tApA(_,k), tAgAk(_,_,k), tArA(_,_,k));
}
}
Tensor tBgBk = tBgB(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tBrB); ++k) {
if (get<1>(tBcB(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gB shifted)
copy_if(gmem_tiled_copy_b, tBpB(_,k), tBgBk(_,_,k), tBrB(_,_,k));
}
}
++k_tile_iter;
--k_tile_count;
}
// Tile MMA compute thread partitions and allocate accumulators
TiledMma tiled_mma;
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCrA = thr_mma.make_fragment_A(thr_mma.partition_A(sA)); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.make_fragment_B(thr_mma.partition_B(sB)); // (MMA,MMA_M,MMA_K)
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(src_accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(src_accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K
//
// Copy Atom retiling
//
auto thr_copy_A = make_tiled_copy_A(SmemCopyAtomA{}, tiled_mma).get_thread_slice(thread_idx);
Tensor tCsA = thr_copy_A.partition_S(sA);
Tensor tCrA_copy_view = thr_copy_A.retile_D(tCrA);
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // M
auto thr_copy_B = make_tiled_copy_B(SmemCopyAtomB{}, tiled_mma).get_thread_slice(thread_idx);
Tensor tCsB = thr_copy_B.partition_S(sB);
Tensor tCrB_copy_view = thr_copy_B.retile_D(tCrB);
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // N
//
// Prologue
//
// Copy rmem to smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
// Clear accumulators
__syncthreads();
// Load A, B smem->rmem for k=0
copy(tCsA(_,_,0), tCrA_copy_view(_,_,0));
copy(tCsB(_,_,0), tCrB_copy_view(_,_,0));
//
// Mainloop
//
// Size of the k-tiles's outer product mode (k)
auto K_BLOCK_MAX = size<2>(tCrA);
CUTLASS_PRAGMA_NO_UNROLL
while (k_tile_count > -1)
{
// Pipeline the outer products with a static for loop
for_each(make_int_sequence<K_BLOCK_MAX>{}, [&] (auto k_block)
{
if (k_block == K_BLOCK_MAX - 1)
{
__syncthreads();
// Copy rmem to smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
__syncthreads();
}
// Load A, B smem->rmem for k+1
int k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static
copy(tCsA(_,_,k_block_next), tCrA_copy_view(_,_,k_block_next));
copy(tCsB(_,_,k_block_next), tCrB_copy_view(_,_,k_block_next));
if (k_block == 0)
{
if (k_tile_count <= 0) {
clear(tApA);
clear(tBpB);
}
copy_if(gmem_tiled_copy_a, tApA, tAgA(_,_,_,*k_tile_iter), tArA);
copy_if(gmem_tiled_copy_b, tBpB, tBgB(_,_,_,*k_tile_iter), tBrB);
++k_tile_iter;
--k_tile_count;
}
// transform before compute
cute::transform(tCrA(_,_,k_block), TransformA{});
cute::transform(tCrB(_,_,k_block), TransformB{});
// Thread-level register gemm for k
// disambiguate gemm (shared with the namespace name)
cute::gemm(tiled_mma, accum, tCrA(_,_,k_block), tCrB(_,_,k_block), src_accum);
});
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/collective/sm70_mma_twostage.hpp/0 | {
"file_path": "include/cutlass/gemm/collective/sm70_mma_twostage.hpp",
"repo_id": "include",
"token_count": 10205
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Problem visitor for grouped GEMMs
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform
bool Transposed = false
>
struct GemmGrouped {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_;
static bool const kTransposed = Transposed;
// Optional transpose
using MapArguments = kernel::detail::MapArguments<
typename Mma::IteratorA::Element,
typename Mma::IteratorA::Layout,
Mma::kTransformA,
Mma::IteratorA::AccessType::kElements,
typename Mma::IteratorB::Element,
typename Mma::IteratorB::Layout,
Mma::kTransformB,
Mma::IteratorB::AccessType::kElements,
typename Mma::LayoutC,
kTransposed
>;
// Public-facing type definitions related to operand element type, layout, and complex conjugate
// operation. Must interact with the 'kTransposed' notion.
using ElementA = typename MapArguments::ElementA;
using LayoutA = typename MapArguments::LayoutA;
using ElementB = typename MapArguments::ElementB;
using LayoutB = typename MapArguments::LayoutB;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename MapArguments::LayoutC;
static ComplexTransform const kTransformA = MapArguments::kTransformA;
static ComplexTransform const kTransformB = MapArguments::kTransformB;
// Type definitions about the mainloop.
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = MapArguments::kAlignmentA;
static int const kAlignmentB = MapArguments::kAlignmentB;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using ProblemVisitor = GemmGroupedProblemVisitor<
ThreadblockShape,
kGroupScheduleMode,
kThreadCount,
kThreadCount,
kTransposed>;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord *problem_sizes{nullptr};
int problem_count{0};
int threadblock_count{0};
typename EpilogueOutputOp::Params output_op{};
ElementA ** ptr_A{nullptr};
ElementB ** ptr_B{nullptr};
ElementC ** ptr_C{nullptr};
ElementC ** ptr_D{nullptr};
typename LayoutA::Stride::LongIndex *lda{nullptr};
typename LayoutB::Stride::LongIndex *ldb{nullptr};
typename LayoutC::Stride::LongIndex *ldc{nullptr};
typename LayoutC::Stride::LongIndex *ldd{nullptr};
// Only used by device-level operator
GemmCoord *host_problem_sizes{nullptr};
//
// Methods
//
/// Default ctor
Arguments() = default;
/// Ctor
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord *problem_sizes,
int problem_count,
int threadblock_count,
typename EpilogueOutputOp::Params output_op,
ElementA ** ptr_A,
ElementB ** ptr_B,
ElementC ** ptr_C,
ElementC ** ptr_D,
typename LayoutA::Stride::LongIndex *lda,
typename LayoutB::Stride::LongIndex *ldb,
typename LayoutC::Stride::LongIndex *ldc,
typename LayoutC::Stride::LongIndex *ldd,
GemmCoord *host_problem_sizes=nullptr
):
problem_sizes(problem_sizes),
problem_count(problem_count),
threadblock_count(threadblock_count),
output_op(output_op),
ptr_A(ptr_A),
ptr_B(ptr_B),
ptr_C(ptr_C),
ptr_D(ptr_D),
lda(lda),
ldb(ldb),
ldc(ldc),
ldd(ldd),
host_problem_sizes(host_problem_sizes)
{
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
typename ProblemVisitor::Params problem_visitor{};
int threadblock_count{0};
typename EpilogueOutputOp::Params output_op{};
ElementA ** ptr_A{nullptr};
ElementB ** ptr_B{nullptr};
ElementC ** ptr_C{nullptr};
ElementC ** ptr_D{nullptr};
typename LayoutA::Stride::LongIndex *lda{nullptr};
typename LayoutB::Stride::LongIndex *ldb{nullptr};
typename LayoutC::Stride::LongIndex *ldc{nullptr};
typename LayoutC::Stride::LongIndex *ldd{nullptr};
//
// Methods
//
Params() = default;
CUTLASS_HOST_DEVICE
Params(Arguments const &args,
void *workspace = nullptr,
int tile_count = 0):
problem_visitor(args.problem_sizes, args.problem_count, workspace, tile_count),
threadblock_count(args.threadblock_count),
output_op(args.output_op),
ptr_A(args.ptr_A),
ptr_B(args.ptr_B),
ptr_C(args.ptr_C),
ptr_D(args.ptr_D),
lda(args.lda),
ldb(args.ldb),
ldc(args.ldc),
ldd(args.ldd)
{
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr,
int tile_count = 0) {
problem_visitor = typename ProblemVisitor::Params(args.problem_sizes, args.problem_count,
workspace, tile_count);
threadblock_count = args.threadblock_count;
output_op = args.output_op;
ptr_A = args.ptr_A;
ptr_B = args.ptr_B;
ptr_C = args.ptr_C;
ptr_D = args.ptr_D;
lda = args.lda;
ldb = args.ldb;
ldc = args.ldc;
ldd = args.ldd;
}
};
/// Shared memory storage structure
struct SharedStorage {
union {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
} kernel;
// ProblemVisitor shared storage can't be overlapped with others
typename ProblemVisitor::SharedStorage problem_visitor;
};
public:
//
// Methods
//
CUTLASS_DEVICE
GemmGrouped() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) {
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
//
// These types shadow the type-level definitions and support the ability to implement
// a 'transposed' GEMM that computes the transposed problems.
//
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
//
// Problem visitor.
//
ProblemVisitor problem_visitor(
params.problem_visitor,
shared_storage.problem_visitor,
blockIdx.x);
// Outer 'persistent' loop to iterate over tiles
while (problem_visitor.next_tile()) {
GemmCoord problem_size = problem_visitor.problem_size();
int32_t problem_idx = problem_visitor.problem_index();
int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx());
GemmCoord grid_shape = problem_visitor.grid_shape(problem_size);
cutlass::gemm::GemmCoord threadblock_offset(
int(threadblock_idx / grid_shape.n()) * Mma::Shape::kM,
int(threadblock_idx % grid_shape.n()) * Mma::Shape::kN,
0);
// Load element pointers. Exchange pointers and strides if working on the transpose
ElementA *ptr_A = reinterpret_cast<ElementA *>((kTransposed ? params.ptr_B[problem_idx] : params.ptr_A[problem_idx]));
typename LayoutA::LongIndex ldm_A = (kTransposed ? params.ldb[problem_idx] : params.lda[problem_idx]);
ElementB *ptr_B = reinterpret_cast<ElementB *>((kTransposed ? params.ptr_A[problem_idx] : params.ptr_B[problem_idx]));
typename LayoutB::LongIndex ldm_B = (kTransposed ? params.lda[problem_idx] : params.ldb[problem_idx]);
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_offset.m(),
0,
};
cutlass::MatrixCoord tb_offset_B{
0,
threadblock_offset.n()
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
LayoutA(ldm_A),
ptr_A,
{problem_size.m(), problem_size.k()},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
LayoutB(ldm_B),
ptr_B,
{problem_size.k(), problem_size.n()},
thread_idx,
tb_offset_B);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Matrix multiply phase
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.kernel.main_loop, thread_idx, warp_idx, lane_idx);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Wait for all threads to finish their epilogue phases from the previous tile.
__syncthreads();
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
ElementC *ptr_C = params.ptr_C[problem_idx];
ElementC *ptr_D = params.ptr_D[problem_idx];
LayoutC layout_C(params.ldc[problem_idx]);
LayoutC layout_D(params.ldd[problem_idx]);
typename Epilogue::OutputTileIterator::Params params_C(layout_C);
typename Epilogue::OutputTileIterator::Params params_D(layout_D);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params_C,
ptr_C,
problem_size.mn(),
thread_idx,
threadblock_offset.mn()
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params_D,
ptr_D,
problem_size.mn(),
thread_idx,
threadblock_offset.mn()
);
Epilogue epilogue(
shared_storage.kernel.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
// Next tile
problem_visitor.advance(gridDim.x);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_grouped.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_grouped.h",
"repo_id": "include",
"token_count": 5488
} | 28 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/barrier.h"
#include "cutlass/block_striped.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock mapping function
>
struct GemmUniversalStreamk {
public:
//
// Types and constants
//
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
/// The per-thread tile of raw accumulators
using AccumulatorTile = typename Mma::FragmentC;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Workspace bytes per thread block
static size_t const kWorkspaceBytesPerBlock =
__NV_STD_MAX(
kThreadCount * sizeof(AccumulatorTile),
Epilogue::kWorkspaceBytesPerBlock);
/// Block-striped reduction utility
using BlockStripedReduceT = BlockStripedReduce<kThreadCount, AccumulatorTile>;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode = GemmUniversalMode::kGemm;
GemmCoord problem_size {};
int batch_count {1}; // Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor
typename EpilogueOutputOp::Params epilogue{};
void const * ptr_A = nullptr;
void const * ptr_B = nullptr;
void const * ptr_C = nullptr;
void * ptr_D = nullptr;
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
int64_t batch_stride_C{0};
int64_t batch_stride_D{0};
typename LayoutA::Stride stride_a{0};
typename LayoutB::Stride stride_b{0};
typename LayoutC::Stride stride_c{0};
typename LayoutC::Stride stride_d{0};
typename LayoutA::Stride::LongIndex lda{0};
typename LayoutB::Stride::LongIndex ldb{0};
typename LayoutC::Stride::LongIndex ldc{0};
typename LayoutC::Stride::LongIndex ldd{0};
int avail_sms{-1}; /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
//
// Methods
//
/// Default Constructor
Arguments() = default;
/// Constructor
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K)
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride stride_a,
typename LayoutB::Stride stride_b,
typename LayoutC::Stride stride_c,
typename LayoutC::Stride stride_d,
int avail_sms = -1 /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
):
mode(mode),
problem_size(problem_size),
batch_count(batch_split),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d), avail_sms(avail_sms)
{
CUTLASS_TRACE_HOST("GemmUniversalStreamk::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Constructor
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K)
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::LongIndex lda,
typename LayoutB::Stride::LongIndex ldb,
typename LayoutC::Stride::LongIndex ldc,
typename LayoutC::Stride::LongIndex ldd,
int avail_sms = -1 /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
):
mode(mode),
problem_size(problem_size),
batch_count(batch_split),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), avail_sms(avail_sms)
{
stride_a = make_Coord(lda);
stride_b = make_Coord(ldb);
stride_c = make_Coord(ldc);
stride_d = make_Coord(ldd);
CUTLASS_TRACE_HOST("GemmUniversalStreamk::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const
{
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.stride_a, args.stride_b);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
/// Parameters structure
struct Params
{
public:
//
// Data members
//
void * ptr_A = nullptr;
void * ptr_B = nullptr;
typename Mma::IteratorA::Params params_A{};
typename Mma::IteratorB::Params params_B{};
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
GemmUniversalMode mode = GemmUniversalMode::kGemm;
ThreadblockSwizzle block_mapping{};
void *barrier_workspace = nullptr;
void *partials_workspace = nullptr;
typename EpilogueOutputOp::Params output_op{};
void * ptr_D = nullptr;
void * ptr_C = nullptr;
typename Epilogue::OutputTileIterator::Params params_D{};
typename Epilogue::OutputTileIterator::Params params_C{};
int64_t batch_stride_D{0};
int64_t batch_stride_C{0};
protected:
//
// Host-only dispatch-utilities
//
/// Pad the given allocation size up to the nearest cache line
static size_t cacheline_align_up(size_t size)
{
static const int CACHELINE_SIZE = 128;
return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE;
}
/// Get the workspace size needed for barrier
size_t get_barrier_workspace_size() const
{
// For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction,
// each reduction block needs its own synchronization flag.
int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region();
int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks);
return cacheline_align_up(sizeof(typename Barrier::T) * num_flags);
}
/// Get the workspace size needed for intermediate partial sums
size_t get_partials_workspace_size() const
{
int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region();
return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks);
}
public:
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a),
params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b),
params_C(args.ldc ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldc) : args.stride_c),
params_D(args.ldd ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldd) : args.stride_d),
output_op(args.epilogue),
mode(args.mode),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(args.ptr_D),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
barrier_workspace(nullptr),
partials_workspace(nullptr)
{
// Number of SMs to make available for StreamK decomposition
int avail_sms = (args.avail_sms == -1) ?
device_sms :
fast_min(args.avail_sms, device_sms);
// Initialize the block mapping structure
block_mapping = ThreadblockSwizzle(
args.mode,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count,
sm_occupancy,
device_sms,
avail_sms,
sizeof(ElementA),
sizeof(ElementB),
sizeof(ElementC),
Epilogue::kAccumulatorFragments);
}
/// Returns the workspace size (in bytes) needed for these parameters
size_t get_workspace_size() const
{
return
get_barrier_workspace_size() +
get_partials_workspace_size();
}
/// Assign and initialize the specified workspace buffer. Assumes
/// the memory allocated to workspace is at least as large as get_workspace_size().
Status init_workspace(
void *workspace,
cudaStream_t stream = nullptr)
{
uint8_t *ptr = static_cast<uint8_t*>(workspace);
// Establish partials workspace
partials_workspace = nullptr;
size_t partials_workspace_bytes = get_partials_workspace_size();
if (partials_workspace_bytes > 0)
{
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
partials_workspace = ptr;
ptr += partials_workspace_bytes;
}
// Establish barrier workspace
barrier_workspace = nullptr;
size_t barrier_workspace_bytes = get_barrier_workspace_size();
if (barrier_workspace_bytes > 0)
{
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
barrier_workspace = ptr;
ptr += barrier_workspace_bytes;
}
// Zero-initialize barrier workspace
if (barrier_workspace)
{
size_t barrier_workspace_bytes = get_barrier_workspace_size();
CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes");
cudaError_t result = cudaMemsetAsync(
barrier_workspace,
0,
barrier_workspace_bytes,
stream);
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Returns the GEMM volume in thread block tiles
cutlass::gemm::GemmCoord get_tiled_shape() const
{
return block_mapping.tiled_shape();
}
/// Returns the total number of thread blocks to launch
int get_grid_blocks() const
{
dim3 grid_dims = get_grid_dims();
return grid_dims.x * grid_dims.y * grid_dims.z;
}
/// Returns the grid extents in thread blocks to launch
dim3 get_grid_dims() const
{
return block_mapping.get_grid_dims();
}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversalStreamK::Params::update()");
// Update input/output pointers
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C = args.batch_stride_C;
batch_stride_D = args.batch_stride_D;
output_op = args.epilogue;
}
};
/// Tile work descriptor
struct TileWorkDesc
{
/// The linear tile index
int tile_idx;
/// The location of this tile (in threadblock-tile coordinates) in the output matrix
cutlass::gemm::GemmCoord tiled_coord;
// The first global-scoped MAC-iteration this threadblock will perform for this tile
int iter_begin;
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
int k_begin;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
int k_end;
/// The number of remaining MAC-iterations this threadblock will perform for this tile
int k_iters_remaining;
// Whether this block will perform the first iteration of this tile
CUTLASS_DEVICE
bool tile_started()
{
return (k_begin == 0);
}
// Whether this block will perform the last iteration of this tile
CUTLASS_DEVICE
bool tile_finished(Params const ¶ms)
{
return (k_end == params.block_mapping.problem_size.k());
}
};
/// Shared memory storage structure
union SharedStorage
{
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
protected:
//
// Data members
//
/// GEMM problem parameters
Params params;
/// Shared storage reference
SharedStorage &shared_storage;
/// ID within the threadblock
int thread_idx;
/// ID of warp
int warp_idx;
/// ID of each thread within a warp
int lane_idx;
/// Threadblock scoped epilogue
Epilogue epilogue;
public:
//
// Host-only dispatch API
//
/// Determines whether the GEMM problem size satisfies this kernel's
/// alignment requirements
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size)
{
CUTLASS_TRACE_HOST("GemmUniversalStreamk::can_implement()");
static int const kAlignmentA = (platform::is_same<LayoutA,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutA,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<LayoutB,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutB,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
/// Determines whether the GEMM problem satisfies this kernel's
/// alignment requirements
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
protected:
//
// Device-only utility methods
//
/// Iterator for fetching tile fragments from A
CUTLASS_DEVICE
typename Mma::IteratorA init_iterator_A(
TileWorkDesc &tile_work,
GemmUniversalMode mode)
{
// The input A matrix
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
// Update input pointers based on batched/array mode
if (mode == GemmUniversalMode::kBatched) {
ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A;
}
if (mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[tile_work.tiled_coord.k()];
}
int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM;
int m_end = params.block_mapping.problem_size.m();
return typename Mma::IteratorA(
params.params_A,
ptr_A,
{ m_end, tile_work.k_end },
threadIdx.x,
{ m_begin, tile_work.k_begin });
}
/// Iterator for fetching tile fragments from B
CUTLASS_DEVICE
typename Mma::IteratorB init_iterator_B(
TileWorkDesc &tile_work,
GemmUniversalMode mode)
{
// The input B matrix
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
// Update input pointers based on batched/array mode
if (mode == GemmUniversalMode::kBatched) {
ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B;
}
if (mode == GemmUniversalMode::kArray) {
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[tile_work.tiled_coord.k()];
}
int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN;
int n_end = params.block_mapping.problem_size.n();
return typename Mma::IteratorB(
params.params_B,
ptr_B,
{ tile_work.k_end, n_end },
threadIdx.x,
{ tile_work.k_begin, n_begin });
}
CUTLASS_DEVICE
void init_dp_tile_work(
TileWorkDesc &tile_work,
int tile_idx)
{
// The linear tile index
tile_work.tile_idx = tile_idx;
// The first global-scoped MAC-iteration this threadblock will perform for this tile
tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile();
// The number of MAC-iterations this threadblock will perform for this tile
tile_work.k_iters_remaining = params.block_mapping.iters_per_tile();
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_begin = 0;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_end = params.block_mapping.problem_size.k();
// The location of this tile (in threadblock-tile coordinates) in the output matrix
tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
}
CUTLASS_DEVICE
void init_sk_tile_work(
TileWorkDesc &tile_work,
int tile_idx,
int block_iter_begin,
int block_iter_end)
{
// The linear tile index
tile_work.tile_idx = tile_idx;
// The first global-scoped MAC-iteration for this tile
int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile();
// The first global-scoped MAC-iteration this threadblock will perform for this tile
tile_work.iter_begin = max(block_iter_begin, tile_iter_begin);
// The first tile-scoped MAC-iteration this threadblock will perform for this tile
int k_iter_begin = tile_work.iter_begin - tile_iter_begin;
// The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile
int k_iter_end = block_iter_end - tile_iter_begin;
// The number of MAC-iterations this threadblock will perform for this tile
tile_work.k_iters_remaining = k_iter_end - k_iter_begin;
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_begin = k_iter_begin * Mma::Shape::kK;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_end = min(
params.block_mapping.problem_size.k(), // extent of k domain
(k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment
// The location of this tile (in threadblock-tile coordinates) in the output matrix
tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
}
/// Share accumulators with peers
CUTLASS_DEVICE
void share_accumulators(
AccumulatorTile const &accumulator_tile,
int block_idx,
int first_block_idx)
{
AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
int accum_tile_offset = first_block_idx * kThreadCount;
if (block_idx == first_block_idx)
{
// First peer initializes the workspace partials
BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
}
else
{
// Subsequent peers atomically accumulate into the workspace partials
if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic)
{
// Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them
Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1);
}
else
{
// Turnstile reduction order: wait until the previous peer has written
int wait_count = block_idx - first_block_idx;
Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count);
}
// Perform reduction in workspace
BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
}
// Signal our arrival
Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx);
}
/// Acquire accumulators from peers
CUTLASS_DEVICE
void acquire_accumulators(
AccumulatorTile &accumulator_tile,
int block_idx,
int first_block_idx)
{
AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
// Wait for arrival
int num_carry_in = block_idx - first_block_idx;
Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in);
// Load and add peer-partials accumulator tile to local accumulator tile
int accum_tile_offset = first_block_idx * kThreadCount;
BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx);
}
/// Perform epilogue computations and output
CUTLASS_DEVICE
void do_epilogue(
TileWorkDesc &tile_work,
AccumulatorTile &accumulator_tile)
{
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
// Update pointers for batched/array mode(s)
if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += tile_work.tiled_coord.k() * params.batch_stride_C;
ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D;
}
if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[tile_work.tiled_coord.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[tile_work.tiled_coord.k()];
}
// Location of this tile in item-coords
MatrixCoord threadblock_item_begin(
tile_work.tiled_coord.m() * Mma::Shape::kM,
tile_work.tiled_coord.n() * Mma::Shape::kN
);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Execute the epilogue operator to update the destination tensor.
epilogue(
EpilogueOutputOp(params.output_op),
iterator_D,
accumulator_tile,
iterator_C);
}
CUTLASS_DEVICE
void separate_reduction(int reduce_idx)
{
int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx;
// Reduce by sk-tile (every tile contributed to by one or more blocks)
reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments;
reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments;
int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile();
int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1;
peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first);
peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last);
// Wait for peers to complete
int peer_idx_end = peer_idx_last + 1;
int num_peers = peer_idx_end - peer_idx_begin;
Barrier::wait_eq_reset(
params.barrier_workspace,
thread_idx,
(reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx,
num_peers);
/// The location of this tile (in threadblock-tile coordinates) in the output matrix
GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx);
// Location of this tile in item-coords
MatrixCoord threadblock_item_begin(
tiled_coord.m() * Mma::Shape::kM,
tiled_coord.n() * Mma::Shape::kN
);
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Execute the epilogue operator to update the destination tensor.
epilogue.reduce(
peer_idx_begin,
peer_idx_end,
reduce_fragment_idx,
params.partials_workspace,
EpilogueOutputOp(params.output_op),
iterator_D,
iterator_C);
}
CUTLASS_DEVICE
void process_tile(
TileWorkDesc tile_work,
int block_idx,
int dp_start_block_idx,
int block_iter_begin)
{
// Initialize input iterators
typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode);
typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode);
// Initialize accumulators
AccumulatorTile accumulator_tile;
accumulator_tile.clear();
// Initialize MMA abstraction
Mma mma(
shared_storage.main_loop,
thread_idx,
warp_idx,
lane_idx);
// Perform this tile's range of multiply-accumulate (MAC) iterations
mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile);
if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) ||
(params.block_mapping.reduction_blocks == 0) ||
(block_idx >= dp_start_block_idx))
{
//
// Cooperative SK peer reduction or DP block
//
int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx);
if (!tile_work.tile_finished(params)) {
// Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace
share_accumulators(accumulator_tile, block_idx, first_block_idx);
}
else
{
// DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile
if (!tile_work.tile_started())
{
// A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks
acquire_accumulators(accumulator_tile, block_idx, first_block_idx);
}
do_epilogue(tile_work, accumulator_tile);
}
}
else
{
//
// Separate peer reduction
//
// Share accumulator partial sums with peer threadblock(s) through scratch workspace
epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started());
// Signal arrival
Barrier::arrive_range_inc(
params.barrier_workspace,
thread_idx,
tile_work.tile_idx * Epilogue::kAccumulatorFragments,
Epilogue::kAccumulatorFragments);
}
}
/// Executes one GEMM
CUTLASS_DEVICE
void gemm()
{
// Initialize block's iteration range
int tile_idx = 0;
int block_iter_begin = 0;
int block_iters_remaining = 0;
int block_idx = params.block_mapping.get_block_idx();
int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region();
int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms;
int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks;
int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks;
// Initialize tile work descriptor
TileWorkDesc tile_work;
bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx);
bool sk_block = (block_idx < sk_padding_start_block_idx);
bool reduce_block = (block_idx >= reduce_start_block_idx) &&
(block_idx < grid_padding_start_block_idx) &&
(ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed);
if (dp_block)
{
// This is a DP block
int dp_block_idx = block_idx - dp_start_block_idx;
int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles;
// Blocks in first DP wave get configured number of tiles
tile_idx = first_dp_tile + dp_block_idx;
int tile_allottment = params.block_mapping.dp_first_wave_tiles;
// Blocks in subsequent DP waves get 1 tile
if (dp_block_idx >= params.block_mapping.avail_sms) {
tile_allottment = 1;
tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms;
}
block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment;
init_dp_tile_work(tile_work, tile_idx);
// DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1)
if ((tile_idx < params.block_mapping.sk_tiles) ||
(tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) ||
(tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n()))
{
return;
}
}
else if (sk_block)
{
// This is a SK block
int block_iter_end;
params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end);
block_iters_remaining = block_iter_end - block_iter_begin;
tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1);
init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
}
else
{
if (reduce_block)
{
// This is a reduction threadblock
int reduce_block_idx = block_idx - reduce_start_block_idx;
separate_reduction(reduce_block_idx);
}
return;
}
// Iteration-processing loop body
CUTLASS_PRAGMA_NO_UNROLL
while (true)
{
// Perform this block's share of work for this tile
process_tile(
tile_work,
block_idx,
dp_start_block_idx,
block_iter_begin);
block_iters_remaining -= tile_work.k_iters_remaining;
if (block_iters_remaining == 0)
{
break;
}
// Continue to next tile
__syncthreads();
if (block_idx >= dp_start_block_idx)
{
// DP block consume their tiles at stride
tile_idx += params.block_mapping.avail_sms;
init_dp_tile_work(tile_work, tile_idx);
}
else
{
// SK blocks consume their tiles in backwards order
tile_idx--;
init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
}
}
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmUniversalStreamk op(params, shared_storage);
op();
}
// Constructor
CUTLASS_DEVICE
GemmUniversalStreamk(
Params const ¶ms,
SharedStorage &shared_storage)
:
params(params),
shared_storage(shared_storage),
thread_idx(threadIdx.x),
warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code
lane_idx(threadIdx.x % 32),
epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx)
{}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()()
{
// Generic SK code path
gemm();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_universal_streamk.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_universal_streamk.h",
"repo_id": "include",
"token_count": 15874
} | 29 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/kernel_hardware_info.hpp"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cute/tensor.hpp"
namespace cutlass::gemm::kernel {
///////////////////////////////////////////////////////////////////////////////
template <
class ProblemShape_,
class CollectiveMainloop_,
class CollectiveEpilogue_,
class TileScheduler_
>
class GemmUniversal<
ProblemShape_,
CollectiveMainloop_,
CollectiveEpilogue_,
TileScheduler_,
cute::enable_if_t<cute::is_base_of_v<KernelMultistage, typename CollectiveMainloop_::DispatchPolicy::Schedule>>>
{
public:
//
// Type Aliases
//
using ProblemShape = ProblemShape_;
static_assert(rank(ProblemShape{}) == 3 or rank(ProblemShape{}) == 4,
"ProblemShape{} should be <M,N,K> or <M,N,K,L>");
// Mainloop derived types
using CollectiveMainloop = CollectiveMainloop_;
using TileShape = typename CollectiveMainloop::TileShape;
using TiledMma = typename CollectiveMainloop::TiledMma;
using ArchTag = typename CollectiveMainloop::ArchTag;
using ElementA = typename CollectiveMainloop::ElementA;
using StrideA = typename CollectiveMainloop::StrideA;
using ElementB = typename CollectiveMainloop::ElementB;
using StrideB = typename CollectiveMainloop::StrideB;
using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy;
using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator;
using MainloopArguments = typename CollectiveMainloop::Arguments;
using MainloopParams = typename CollectiveMainloop::Params;
using TileSchedulerTag = TileScheduler_;
using TileScheduler = typename detail::TileSchedulerSelector<
TileScheduler_, ArchTag, TileShape,
cute::Shape<cute::Int<1>, cute::Int<1>, cute::Int<1>>>::Scheduler;
using TileSchedulerArguments = typename TileScheduler::Arguments;
static constexpr bool is_valid_tile_scheduler =
cute::is_void_v<TileScheduler_> or cute::is_same_v<TileScheduler_, PersistentScheduler>;
static_assert(is_valid_tile_scheduler, "SM70 kernel does not support specializing the tile scheduler.");
// Epilogue derived types
using CollectiveEpilogue = CollectiveEpilogue_;
using ElementC = typename CollectiveEpilogue::ElementC;
using StrideC = typename CollectiveEpilogue::StrideC;
using ElementD = typename CollectiveEpilogue::ElementD;
using StrideD = typename CollectiveEpilogue::StrideD;
using EpilogueArguments = typename CollectiveEpilogue::Arguments;
using EpilogueParams = typename CollectiveEpilogue::Params;
static_assert(cute::is_same_v<ElementAccumulator, typename CollectiveEpilogue::ElementAccumulator>,
"Mainloop and epilogue do not agree on accumulator value type.");
// MSVC requires the cast to fix a warning-as-error.
static constexpr int SharedStorageSize = static_cast<int>(cute::max(
sizeof(typename CollectiveMainloop::SharedStorage),
sizeof(typename CollectiveEpilogue::SharedStorage)));
static constexpr uint32_t MaxThreadsPerBlock = CUTE_STATIC_V(cute::size(TiledMma{}));
static constexpr uint32_t MinBlocksPerMultiprocessor = 1;
// Device side arguments
struct Arguments {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopArguments mainloop{};
EpilogueArguments epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerArguments scheduler{};
};
// Kernel entry point API
struct Params {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopParams mainloop{};
EpilogueParams epilogue{};
};
//
// Methods
//
// Convert to underlying arguments. In this case, a simple copy for the aliased type.
static
Params
to_underlying_arguments(Arguments const& args, void* workspace) {
(void) workspace;
KernelHardwareInfo hw_info{args.hw_info.device_id, args.hw_info.sm_count};
auto problem_shape_MNKL = append<4>(args.problem_shape, Int<1>{});
return {
args.mode,
args.problem_shape,
CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace),
CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace)
};
}
static bool
can_implement(Arguments const& args) {
bool mode_implementable = args.mode == GemmUniversalMode::kGemm or
(args.mode == GemmUniversalMode::kBatched && rank(ProblemShape{}) == 4);
return mode_implementable && TileScheduler::can_implement(args.scheduler);
}
static size_t
get_workspace_size(Arguments const& args) {
size_t workspace_size = 0;
return workspace_size;
}
static
cutlass::Status
initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr,
CudaHostAdapter* cuda_adapter = nullptr) {
cutlass::Status status = Status::kSuccess;
return status;
}
static dim3
get_grid_shape(Params const& params) {
int batch_count = 1;
if constexpr (cute::rank(ProblemShape{}) == 4) {
batch_count = cute::size<3>(params.problem_shape);
}
return dim3(
cute::size(cute::ceil_div(cute::shape<0>(params.problem_shape), cute::shape<0>(TileShape{}))),
cute::size(cute::ceil_div(cute::shape<1>(params.problem_shape), cute::shape<1>(TileShape{}))),
batch_count
);
}
static dim3
get_block_shape() {
return dim3(MaxThreadsPerBlock, 1, 1);
}
CUTLASS_DEVICE
void
operator()(Params const& params, char* smem_buf) {
using namespace cute;
using X = Underscore;
// Preconditions
CUTE_STATIC_ASSERT(is_static<TileShape>::value);
// Separate out problem shape for convenience
// Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{});
auto [M,N,K,L] = problem_shape_MNKL;
// Preconditions
static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
// Get the appropriate blocks for this thread block -- potential for thread block locality
int thread_idx = int(threadIdx.x);
auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K)
auto [m_coord, n_coord, l_coord] = static_cast<uint3>(blockIdx);
auto blk_coord_mnkl = make_coord(m_coord, n_coord, _, l_coord); // (m,n,k,l)
// Represent the full tensors
Tensor mA_mkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_A), make_shape(M,K,L), params.mainloop.dA); //(m,k,l)
Tensor mB_nkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_B), make_shape(N,K,L), params.mainloop.dB); //(n,k,l)
// Get batch slice
Tensor mA_mk = mA_mkl(_,_,l_coord); // (m,k)
Tensor mB_nk = mB_nkl(_,_,l_coord); // (n,k)
// Slice to get the tiles this thread block is responsible for
Tensor gA = local_tile(mA_mk, blk_shape, take<0,3>(blk_coord_mnkl), Step<_1, X,_1>{}); // (BLK_M,BLK_K,k)
Tensor gB = local_tile(mB_nk, blk_shape, take<0,3>(blk_coord_mnkl), Step< X,_1,_1>{}); // (BLK_N,BLK_K,k)
// Compute tile residues for predication
auto m_max_coord = M - size<0>(gA) * get<0>(blk_coord_mnkl); // M - BLK_M * m_coord
auto n_max_coord = N - size<0>(gB) * get<1>(blk_coord_mnkl); // N - BLK_N * n_coord
auto k_residue = K - size<1>(gA) * size<2>(gA); // K - BLK_K * k_coord_max
auto residue_mnk = make_tuple(m_max_coord, n_max_coord, k_residue);
// Allocate the tiled_mma and the accumulators for the (M,N) blk_shape
TiledMma tiled_mma;
Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N)
clear(accumulators);
auto k_tile_iter = cute::make_coord_iterator(shape<2>(gA));
int k_tile_count = size<2>(gA);
// Perform the collective scoped MMA
CollectiveMainloop collective_mma;
collective_mma(
accumulators,
gA,
gB,
accumulators,
k_tile_iter, k_tile_count,
residue_mnk,
thread_idx,
smem_buf
);
// Epilogue and write to gD
CollectiveEpilogue epilogue{params.epilogue};
epilogue(
problem_shape_MNKL,
blk_shape,
blk_coord_mnkl,
accumulators,
tiled_mma,
residue_mnk,
thread_idx,
smem_buf
);
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::kernel
| include/cutlass/gemm/kernel/sm70_gemm.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/sm70_gemm.hpp",
"repo_id": "include",
"token_count": 4143
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma1_, ///! Threadblock-scoped triangular matrix multiply-accumulate (A*B or B*A)
typename Mma2_, ///! Threadblock-scoped triangular matrix multiply-accumulate (AT*B or B*AT)
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
SideMode SideMode_, ///! Side Mode for the kernel (kLeft or kRight)
FillMode FillMode_ ///! Fill Mode for triangular matrix (kLower or kUpper)
>
struct SymmUniversal {
public:
using Mma1 = Mma1_;
using Mma2 = Mma2_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma1::IteratorA::Element;
using ElementB = typename Mma1::IteratorB::Element;
// Mma1 (TRMM - with diagonal: C_tmp = alpha * A * B)
using LayoutA = typename Mma1::IteratorA::Layout;
using LayoutBT = typename Mma1::IteratorB::Layout;
static ComplexTransform const kMma1TransformA = Mma1::kTransformA;
static ComplexTransform const kMma1TransformB = Mma1::kTransformB;
// Mma2 (TRMM - withOUT diagonal: alpha * AT * B)
using LayoutB = typename Mma2::IteratorA::Layout;
using LayoutAT = typename Mma2::IteratorB::Layout;
static ComplexTransform const kMma2TransformA = Mma2::kTransformA;
static ComplexTransform const kMma2TransformB = Mma2::kTransformB;
// Common type definitions for Mma1 and Mma2
using Operator = typename Mma1::Operator;
using OperatorClass = typename Mma1::Operator::OperatorClass;
using ThreadblockShape = typename Mma1::Shape;
using WarpShape = typename Mma1::Operator::Shape;
using InstructionShape = typename Mma1::Policy::Operator::InstructionShape;
using ArchTag = typename Mma1::ArchTag;
static int const kStages = Mma1::kStages;
static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements;
// Output related typedefinitions
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static SideMode const kSideModeA = SideMode_;
static FillMode const kFillModeA = FillMode_;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma1::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode = GemmUniversalMode::kGemm;
GemmCoord problem_size{};
int batch_count{1};
typename EpilogueOutputOp::Params epilogue{};
void const * ptr_A{nullptr};
void const * ptr_B{nullptr};
void const * ptr_C{nullptr};
void * ptr_D{nullptr};
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
int64_t batch_stride_C{0};
int64_t batch_stride_D{0};
typename LayoutA::Stride::Index lda{0};
typename LayoutB::Stride::Index ldb{0};
typename LayoutC::Stride::Index ldc{0};
typename LayoutC::Stride::Index ldd{0};
//
// Methods
//
Arguments() = default;
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(0),
batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd) {
}
/// Returns arguments for the transposed problem sizes
Arguments transposed_problem_size() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
return args;
}
/// Returns arguments for the transposed matrices
Arguments swapped_matrices() const {
Arguments args(*this);
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size{};
cutlass::gemm::GemmCoord grid_tiled_shape{};
int swizzle_log_tile{0};
// Mma1 Iterator A and B params
typename Mma1::IteratorA::Params params_A_mma1{};
typename Mma1::IteratorB::Params params_B_mma1{};
// Mma2 Iterator A and B params
typename Mma2::IteratorA::Params params_A_mma2{};
typename Mma2::IteratorB::Params params_B_mma2{};
typename Epilogue::OutputTileIterator::Params params_C{};
typename Epilogue::OutputTileIterator::Params params_D{};
typename EpilogueOutputOp::Params output_op{};
GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
int batch_count {0};
int gemm_k_size {0};
void * ptr_A{nullptr};
void * ptr_B{nullptr};
void * ptr_C{nullptr};
void * ptr_D{nullptr};
int64_t batch_stride_A {0};
int64_t batch_stride_B {0};
int64_t batch_stride_C {0};
int64_t batch_stride_D {0};
int *semaphore{nullptr};
//
// Methods
//
Params() = default;
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
int gemm_k_size,
void *workspace = nullptr
):
problem_size(args.problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A_mma1(args.lda),
params_B_mma1(args.ldb),
params_A_mma2(args.lda),
params_B_mma2(args.ldb),
params_C(args.ldc),
params_D(args.ldd),
output_op(args.epilogue),
mode(args.mode),
batch_count(args.batch_count),
gemm_k_size(gemm_k_size),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(const_cast<void *>(args.ptr_D)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
semaphore(static_cast<int *>(workspace)) {
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr) {
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
output_op = args.epilogue;
semaphore = static_cast<int *>(workspace);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma1::SharedStorage mma1_main_loop;
typename Mma2::SharedStorage mma2_main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Methods
//
CUTLASS_DEVICE
SymmUniversal() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes two GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_MxK_mma1{
threadblock_tile_offset.m() * Mma1::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_KxN_mma1{
offset_k,
threadblock_tile_offset.n() * Mma1::Shape::kN
};
cutlass::MatrixCoord tb_offset_MxK_mma2{
threadblock_tile_offset.m() * Mma1::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_KxN_mma2{
offset_k,
threadblock_tile_offset.n() * Mma1::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply for Mma1
Mma1 mma1(shared_storage.mma1_main_loop, thread_idx, warp_idx, lane_idx);
// Construct thread-scoped matrix multiply for Mma2
Mma2 mma2(shared_storage.mma2_main_loop, thread_idx, warp_idx, lane_idx);
typename Mma1::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
int gemm_k_iterations_mma1 = gemm_k_iterations;
int gemm_k_iterations_mma2 = gemm_k_iterations;
/******************************************************************************************************
* SYMM (Side Mode, Fill Mode) is made of two TRMMs:
First TRMM (Mma1: Side Mode, Fill Mode, Non-Unit Diag): (A * B) or (B * A)
Second TRMM (Mma2: Side Mode, Inverted Fill Mode, Unit Diag): (AT * B) or (B * AT)
* For the first TRMM (Mma1) of SYMM, the following method is used to calculate the k-iterations:
First two cases: (Left Side, Lower Fill) and (Right Side, Upper Fill) are transpose of each other
- (Left Side, Lower Fill): calculate bottom of the CTA tile, then find the k-iterations
needed to process all elements till that coordinate.
- (Right Side, Upper Fill): calculate right end of the CTA tile, then find the k-iterations
needed to process all elements till that coordinate.
Last two cases: (Left Side, Upper Fill) and (Right Side, Lower Fill) are transpose of each other
- (Left Side, Upper Fill): calculate the top of the CTA tile, then find k-iterations
that can be skipped for all elements of this tile.
- (Right Side, Lower Fill): calculate the left start of the CTA tile, then find k-iterations
that can be skipped for all elements of this tile.
* For the second TRMM (Mma2) of SYMM, the k-iterations and threadblock offsets are calculated
the same way as the first TRMM (Mma1) of same side mode but with inverted fill mode.
For example, if the first TRMM is left sided with lower fill, the second TRMM would be
left sided with upper fill.
********************************************************************************************************/
if (kSideModeA == SideMode::kLeft && kFillModeA == FillMode::kLower) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 < gemm_k_iterations) {
gemm_k_iterations_mma1 = k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.m()) * Mma1::Shape::kM) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 != 0) {
tb_offset_MxK_mma2 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma2 * Mma1::Shape::kK});
tb_offset_KxN_mma2 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma2 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma2 -= k_iterations_till_diagonal_mma2;
}
} else if (kSideModeA == SideMode::kRight && kFillModeA == FillMode::kUpper) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.n() + 1) * Mma1::Shape::kN + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 < gemm_k_iterations) {
gemm_k_iterations_mma1 = k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.n()) * Mma1::Shape::kN) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 != 0) {
tb_offset_MxK_mma2 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma2 * Mma1::Shape::kK});
tb_offset_KxN_mma2 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma2 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma2 -= k_iterations_till_diagonal_mma2;
}
} else if (kSideModeA == SideMode::kLeft && kFillModeA == FillMode::kUpper) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.m()) * Mma1::Shape::kM) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 != 0) {
tb_offset_MxK_mma1 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma1 * Mma1::Shape::kK});
tb_offset_KxN_mma1 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma1 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma1 -= k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 < gemm_k_iterations) {
gemm_k_iterations_mma2 = k_iterations_till_diagonal_mma2;
}
} else if (kSideModeA == SideMode::kRight && kFillModeA == FillMode::kLower) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.n()) * Mma1::Shape::kN) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 != 0) {
tb_offset_MxK_mma1 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma1 * Mma1::Shape::kK});
tb_offset_KxN_mma1 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma1 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma1 -= k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.n() + 1) * Mma1::Shape::kN + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 < gemm_k_iterations) {
gemm_k_iterations_mma2 = k_iterations_till_diagonal_mma2;
}
}
// Construct iterators to A and B operands for Mma1
typename Mma1::IteratorA iterator_A_mma1(
params.params_A_mma1,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK_mma1);
typename Mma1::IteratorB iterator_B_mma1(
params.params_B_mma1,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_KxN_mma1);
// Construct iterators to A and B operands for Mma2
typename Mma2::IteratorA iterator_A_mma2(
params.params_A_mma2,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK_mma2);
typename Mma2::IteratorB iterator_B_mma2(
params.params_B_mma2,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_KxN_mma2);
// Compute threadblock-scoped matrix multiply-add (A x B) or (B x A)
mma1(
gemm_k_iterations_mma1,
accumulators,
iterator_A_mma1,
iterator_B_mma1,
accumulators);
// Compute threadblock-scoped matrix multiply-add (AT x B) or (B x AT)
mma2(
gemm_k_iterations_mma2,
accumulators,
iterator_A_mma2,
iterator_B_mma2,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma1::Shape::kM,
threadblock_tile_offset.n() * Mma1::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/symm_universal.h/0 | {
"file_path": "include/cutlass/gemm/kernel/symm_universal.h",
"repo_id": "include",
"token_count": 9434
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming
expectations about data layout of the global memory fragments, data types,
and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting sparse
TensorOp instructions.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/gemm/warp/default_mma_sparse_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h"
#include "cutlass/gemm/threadblock/mma_sparse_multistage.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Template defininng default matrix multiply operators inferred from threadblock tile size,
/// global memory data layout, and target math instruction.
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false
/// Cache operation of operand A
, cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global
>
struct DefaultSparseMmaCore;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultSparseMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static int const kSparse = 2;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
platform::min(Shape::kM / (kAccessSizeInBits / sizeof_bits<ElementA>::value), 8);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
platform::min(Shape::kN / (kAccessSizeInBits / sizeof_bits<ElementB>::value), 8);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
static int const Crosswise_A = platform::min(int(128 / sizeof(ElementA)),
Shape::kM);
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value, Crosswise_A>;
// Shared memory layout
static int const Crosswise_B = platform::min(int(128 / sizeof(ElementB)),
Shape::kN);
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementB>::value, Crosswise_B>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK / kSparse>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK / kSparse>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Cache operation of operand E
static cutlass::arch::CacheOperation::Kind const kCacheOpE =
cutlass::arch::CacheOperation::Global;
static int const kInterleavedE = MmaTensorOp::kInterleaved;
static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits;
static int const kMaxID2 = MmaTensorOp::kMaxID2;
static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE;
using ElementE = typename MmaTensorOp::ElementE;
using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved<kInterleavedE>;
// Shared memory layout. Interleaved layout is mapped to PitchLinear layout.
using SmemLayoutE = typename MmaTensorOp::LayoutE;
/// ThreadMap of iterator E
static int const kElementsPerAccessE =
kAccessSizeInBits / sizeof_bits<ElementE>::value;
/// E is tiny. Not all warps are needed.
static int const kThreadsE =
(Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value) >
kThreads)
? kThreads
: (Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value));
using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE /
kInterleavedE>,
kThreadsE, kElementsPerAccessE>;
/// Shared memory iterator to E operand
using SmemIteratorE = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE / kInterleavedE>,
ElementE, SmemLayoutE, 0, IteratorThreadMapE>;
/// Policy used to define MmaPipelined
using MmaPolicy =
SparseMmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultSparseMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static int const kSparse = 2;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / kSparse / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
// crosswise cannot be larger than 1024 bit.
static int const kCrosswiseB =
(Shape::kK > (1024 / sizeof_bits<ElementB>::value))
? (1024 / sizeof_bits<ElementB>::value)
: Shape::kK;
static int const kWarpThreadArrangementContiguousB =
kCrosswiseB / (kAccessSizeInBits / sizeof_bits<ElementB>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK / kSparse>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, kCrosswiseB>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK / kSparse, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK / kSparse>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Cache operation of operand E
static cutlass::arch::CacheOperation::Kind const kCacheOpE =
cutlass::arch::CacheOperation::Global;
static int const kInterleavedE = MmaTensorOp::kInterleaved;
static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits;
static int const kMaxID2 = MmaTensorOp::kMaxID2;
static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE;
using ElementE = typename MmaTensorOp::ElementE;
using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved<kInterleavedE>;
// Shared memory layout. Interleaved layout is mapped to PitchLinear layout.
using SmemLayoutE = typename MmaTensorOp::LayoutE;
/// ThreadMap of iterator E
static int const kElementsPerAccessE =
kAccessSizeInBits / sizeof_bits<ElementE>::value;
/// E is tiny. Not all warps are needed.
static int const kThreadsE =
(Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value) >
kThreads)
? kThreads
: (Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value));
using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE /
kInterleavedE>,
kThreadsE, kElementsPerAccessE>;
/// Shared memory iterator to E operand
using SmemIteratorE = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE / kInterleavedE>,
ElementE, SmemLayoutE, 0, IteratorThreadMapE>;
/// Policy used to define MmaPipelined
using MmaPolicy =
SparseMmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultSparseMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static int const kSparse = 2;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const Crosswise_A = platform::min(int(128 / sizeof(ElementA)),
Shape::kM);
static int const kWarpThreadArrangementContiguousA =
platform::min(Shape::kM / (kAccessSizeInBits / sizeof_bits<ElementA>::value), 8);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
// Warp thread arrangement
// crosswise cannot be larger than 1024 bit.
static int const kCrosswiseB =
(Shape::kK > (1024 / sizeof_bits<ElementB>::value))
? (1024 / sizeof_bits<ElementB>::value)
: Shape::kK;
static int const kWarpThreadArrangementContiguousB =
kCrosswiseB / (kAccessSizeInBits / sizeof_bits<ElementB>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value, Crosswise_A>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, kCrosswiseB>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK / kSparse>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK / kSparse>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Cache operation of operand E
static cutlass::arch::CacheOperation::Kind const kCacheOpE =
cutlass::arch::CacheOperation::Global;
static int const kInterleavedE = MmaTensorOp::kInterleaved;
static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits;
static int const kMaxID2 = MmaTensorOp::kMaxID2;
static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE;
using ElementE = typename MmaTensorOp::ElementE;
using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved<kInterleavedE>;
// Shared memory layout. Interleaved layout is mapped to PitchLinear layout.
using SmemLayoutE = typename MmaTensorOp::LayoutE;
/// ThreadMap of iterator E
static int const kElementsPerAccessE =
kAccessSizeInBits / sizeof_bits<ElementE>::value;
/// E is tiny. Not all warps are needed.
static int const kThreadsE =
(Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value) >
kThreads)
? kThreads
: (Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value));
using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE /
kInterleavedE>,
kThreadsE, kElementsPerAccessE>;
/// Shared memory iterator to E operand
using SmemIteratorE = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE / kInterleavedE>,
ElementE, SmemLayoutE, 0, IteratorThreadMapE>;
/// Policy used to define MmaPipelined
using MmaPolicy =
SparseMmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultSparseMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static int const kSparse = 2;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / kSparse / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
platform::min(Shape::kN / (kAccessSizeInBits / sizeof_bits<ElementB>::value), 8);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
static int const Crosswise_B = platform::min(int(128 / sizeof(ElementB)),
Shape::kN);
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK / kSparse>;
// Shared memory layout
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementB>::value, Crosswise_B>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK / kSparse, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK / kSparse>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Cache operation of operand E
static cutlass::arch::CacheOperation::Kind const kCacheOpE =
cutlass::arch::CacheOperation::Global;
static int const kInterleavedE = MmaTensorOp::kInterleaved;
static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits;
static int const kMaxID2 = MmaTensorOp::kMaxID2;
static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE;
using ElementE = typename MmaTensorOp::ElementE;
using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved<kInterleavedE>;
// Shared memory layout. Interleaved layout is mapped to PitchLinear layout.
using SmemLayoutE = typename MmaTensorOp::LayoutE;
/// ThreadMap of iterator E
static int const kElementsPerAccessE =
kAccessSizeInBits / sizeof_bits<ElementE>::value;
/// E is tiny. Not all warps are needed.
static int const kThreadsE =
(Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value) >
kThreads)
? kThreads
: (Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value));
using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE /
kInterleavedE>,
kThreadsE, kElementsPerAccessE>;
/// Shared memory iterator to E operand
using SmemIteratorE = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE / kInterleavedE>,
ElementE, SmemLayoutE, 0, IteratorThreadMapE>;
/// Policy used to define MmaPipelined
using MmaPolicy =
SparseMmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h",
"repo_id": "include",
"token_count": 12270
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implements several possible threadblock-swizzling functions mapping blockIdx to
GEMM problems.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/gemm/threadblock/index_remat.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle_streamk.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for GEMMs
template <int N = 1>
struct GemmIdentityThreadblockSwizzle {
CUTLASS_HOST_DEVICE
GemmIdentityThreadblockSwizzle() { }
/// Returns the shape of the problem in units of logical tiles
/// *Gemm* problem size: gemm(M, N, K)
CUTLASS_HOST_DEVICE
static GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int split_k_slices) {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
split_k_slices);
}
/// Returns the shape of the problem in units of logical tiles
/// *ImplicitGemm* Conv2d problem size: conv_operator(NPQK, NHWC, KRSC)
CUTLASS_HOST_DEVICE
static GemmCoord get_tiled_shape(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
GemmCoord tile_size,
int split_k_slices) {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
return get_tiled_shape(
implicit_gemm_problem_size, tile_size, split_k_slices);
}
/// Returns the shape of the problem in units of logical tiles
/// *ImplicitGemm* Conv3d problem size: conv_operator(NZPQK, NDHWC, KTRSC)
CUTLASS_HOST_DEVICE
static GemmCoord get_tiled_shape(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv3dProblemSize const &problem_size,
GemmCoord tile_size,
int split_k_slices) {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
return get_tiled_shape(
implicit_gemm_problem_size, tile_size, split_k_slices);
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
static dim3 get_grid_shape(GemmCoord tiled_shape) {
int tile = 1 << get_log_tile(tiled_shape);
return dim3(tiled_shape.m() * tile, (tiled_shape.n() + tile - 1) / tile, tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
static int get_log_tile(GemmCoord tiled_shape) {
auto n = tiled_shape.n();
// Thresholds picked so that it doesn't cause too many no-op CTAs
if (N >= 8 && n >= 6)
return 3;
else if (N >= 4 && n >= 3)
return 2;
else if (N >= 2 && n >= 2)
return 1;
else
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(int log_tile) {
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
int block_idx_z = RematerializeBlockIdxZ();
return GemmCoord{(block_idx_x >> log_tile), //
(block_idx_y << log_tile) + ((block_idx_x) & ((1 << (log_tile)) - 1)),
block_idx_z};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(GemmCoord tiled_shape) {
int const kTile = N;
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
if ((tiled_shape.m() < kTile) || (tiled_shape.n() < kTile))
return GemmCoord{block_idx_x, block_idx_y, RematerializeBlockIdxZ()};
return GemmCoord{
(block_idx_x / kTile),
(block_idx_y * kTile) + (block_idx_x % kTile),
RematerializeBlockIdxZ()
};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for GEMMs
struct GemmHorizontalThreadblockSwizzle {
CUTLASS_HOST_DEVICE
GemmHorizontalThreadblockSwizzle() { }
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
static GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int split_k_slices) {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
split_k_slices);
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
static dim3 get_grid_shape(GemmCoord tiled_shape) {
return dim3(tiled_shape.n(), tiled_shape.m(), tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
static int get_log_tile(GemmCoord tiled_shape) {
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(GemmCoord tiled_shape) {
return GemmCoord{
RematerializeBlockIdxY(),
RematerializeBlockIdxX(),
RematerializeBlockIdxZ()
};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for batched GEMMs
struct GemmBatchedIdentityThreadblockSwizzle {
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
static GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int batch_count) {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
batch_count % (1 << 16));
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
static dim3 get_grid_shape(GemmCoord tiled_shape) {
return dim3(tiled_shape.m(), tiled_shape.n(), tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
static int get_log_tile(GemmCoord tiled_shape) {
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(GemmCoord tiled_shape) {
return GemmCoord{
RematerializeBlockIdxX(),
RematerializeBlockIdxY(),
RematerializeBlockIdxZ()
};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(int log_tile) {
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
int block_idx_z = RematerializeBlockIdxZ();
return GemmCoord{(block_idx_x >> log_tile), //
(block_idx_y << log_tile) + ((block_idx_x) & ((1 << (log_tile)) - 1)),
block_idx_z};
}
/// Gets the batch index
CUTLASS_DEVICE
static int get_batch_idx() {
return RematerializeBlockIdxZ();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for split-K GEMMs
template <int N = 1>
struct GemmSplitKIdentityThreadblockSwizzle {
int const kTile = N;
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
static GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int partitions) {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
partitions);
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
static int get_log_tile(GemmCoord tiled_shape) {
auto n = tiled_shape.n();
// Thresholds picked so that it doesn't cause too many no-op CTAs
if (N >= 8 && n >= 6)
return 3;
else if (N >= 4 && n >= 3)
return 2;
else if (N >= 2 && n >= 2)
return 1;
else
return 0;
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
static dim3 get_grid_shape(GemmCoord tiled_shape) {
int tile = 1 << get_log_tile(tiled_shape);
return dim3(tiled_shape.m() * tile, (tiled_shape.n() + tile - 1) / tile, tiled_shape.k());
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(int log_tile) {
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
int block_idx_z = RematerializeBlockIdxZ();
return GemmCoord{(block_idx_x >> log_tile), //
(block_idx_y << log_tile) + ((block_idx_x) & ((1 << (log_tile)) - 1)),
block_idx_z};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(GemmCoord tiled_shape) {
int const kTile = N;
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
if ((tiled_shape.m() < kTile) || (tiled_shape.n() < kTile))
return GemmCoord{block_idx_x, block_idx_y, RematerializeBlockIdxZ()};
return GemmCoord{
(block_idx_x / kTile),
(block_idx_y * kTile) + (block_idx_x % kTile),
RematerializeBlockIdxZ()
};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for split-K GEMMs
struct GemmSplitKHorizontalThreadblockSwizzle {
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
static GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int partitions) {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
partitions);
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
static dim3 get_grid_shape(GemmCoord tiled_shape) {
return dim3(tiled_shape.n(), tiled_shape.m(), tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
static int get_log_tile(GemmCoord tiled_shape) {
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(int log_tile) {
return GemmCoord{
RematerializeBlockIdxY(),
RematerializeBlockIdxX(),
RematerializeBlockIdxZ()
};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static GemmCoord get_tile_offset(GemmCoord tiled_shape) {
return GemmCoord{
RematerializeBlockIdxY(),
RematerializeBlockIdxX(),
RematerializeBlockIdxZ()
};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for batched GEMVs
struct GemvBatchedStridedThreadblockDefaultSwizzle {
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
static BatchedGemmCoord get_tiled_shape(
BatchedGemmCoord problem_size,
BatchedGemmCoord tile_size) {
return BatchedGemmCoord(
1, // M is always 1
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
(problem_size.k() + tile_size.k() - 1) / tile_size.k(),
(problem_size.batch() + tile_size.batch() - 1) / tile_size.batch());
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
static dim3 get_grid_shape(BatchedGemmCoord tiled_shape) {
return dim3(tiled_shape.n(), tiled_shape.batch(), tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
static int get_log_tile(GemmCoord tiled_shape) {
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static BatchedGemmCoord get_tile_offset(int log_tile) {
return BatchedGemmCoord{
0, // M is always 1
RematerializeBlockIdxX(),
RematerializeBlockIdxZ(),
RematerializeBlockIdxY(),
};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
static BatchedGemmCoord get_tile_offset() {
return BatchedGemmCoord{
0, // M is always 1
RematerializeBlockIdxX(),
RematerializeBlockIdxZ(),
RematerializeBlockIdxY(),
};
}
/// Gets the batch tile index
CUTLASS_DEVICE
static int get_batch_tile_idx() {
return RematerializeBlockIdxY();
}
/// Gets the absolute batch index
CUTLASS_DEVICE
static int get_batch_idx() {
return RematerializeBlockDimY()*RematerializeBlockIdxY() + RematerializeThreadIdxY();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/threadblock_swizzle.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/threadblock_swizzle.h",
"repo_id": "include",
"token_count": 5536
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/array_planar_complex.h"
#include "cutlass/gemm/warp/tile_iterator_planar_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Underlying real-valued warp-level matrix multiply
typename Operator_,
/// Transformation applied to A operand (typically folded into math instruction)
ComplexTransform TransformA = ComplexTransform::kNone,
/// Transformation applied to B operand (typically folded into math instruction)
ComplexTransform TransformB = ComplexTransform::kNone
>
class MmaPlanarComplex {
public:
/// Underlying real-valued warp-level matrix multiply
using Operator = Operator_;
/// Shape of warp-level matrix multipy
using Shape = typename Operator::Shape;
/// Transformation applied to A operand (typically folded into math instruction)
static ComplexTransform const kTransformA = TransformA;
/// Transformation applied to B operand (typically folded into math instruction)
static ComplexTransform const kTransformB = TransformB;
/// Fragment of elements
using FragmentA = ArrayPlanarComplex<typename Operator::ElementA, Operator::FragmentA::kElements>;
/// Iterator into planar complex
using IteratorA = TileIteratorPlanarComplex<typename Operator::IteratorA>;
/// Layout in memory of the A operand
using LayoutA = typename Operator::LayoutA;
using FragmentB = ArrayPlanarComplex<typename Operator::ElementB, Operator::FragmentB::kElements>;
/// Iterator into planar complex
using IteratorB = TileIteratorPlanarComplex<typename Operator::IteratorB>;
/// Layout in memory of the B operand
using LayoutB = typename Operator::LayoutB;
/// Tile iterator for accumulator
using IteratorC = TileIteratorPlanarComplex<typename Operator::IteratorC>;
/// Accumulator fragment
using FragmentC = ArrayPlanarComplex<typename Operator::ElementC, Operator::FragmentC::kElements>;
/// Layout of accumulator fragment in memory
using LayoutC = typename Operator::LayoutC;
private:
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Operator::Shape::kM / Operator::Policy::Operator::Shape::kM,
Operator::Shape::kN / Operator::Policy::Operator::Shape::kN
>;
public:
/// Ctor
CUTLASS_DEVICE
MmaPlanarComplex() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A_in,
FragmentB const &B_in,
FragmentC const &C) const {
D.real = C.real;
D.imag = C.imag;
//
// Transform fragments based on conjugate operations.
//
negate<typename FragmentA::ArrayReal> neg_A;
FragmentA frag_A;
frag_A.real = A_in.real;
if (kTransformA == ComplexTransform::kConjugate) {
frag_A.imag = neg_A(frag_A.imag);
}
else {
frag_A.imag = frag_A.imag;
}
FragmentB frag_B;
frag_B.real = B_in.real;
if (kTransformB == ComplexTransform::kConjugate) {
negate<typename FragmentB::ArrayReal> neg;
frag_B.imag = neg(frag_B.imag);
}
else {
frag_B.imag = frag_B.imag;
}
//
// Accumulated real-valued matrix multiplies
//
Operator real_mma;
// D.i += A.i * B.r
real_mma(D.imag, frag_A.imag, frag_B.real, D.imag);
// D.r += A.r * B.r
real_mma(D.real, frag_A.real, frag_B.real, D.real);
// D.i += A.r * B.i
real_mma(D.imag, frag_A.real, frag_B.imag, D.imag);
// D.r += -A.i * B.i
frag_A.imag = neg_A(frag_A.imag);
real_mma(D.real, frag_A.imag, frag_B.imag, D.real);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_planar_complex.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_planar_complex.h",
"repo_id": "include",
"token_count": 1855
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/wmma.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/wmma_array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
///< Structure to compute the matrix product targeting CUDA cores via WMMA.
template <
///< Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
///< Data type of A elements
typename ElementA_,
///< Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
///< Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
///< Element type of C matrix
typename ElementC_,
///< Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
///< Policy describing warp-level Wmma operation (concept: MmaTensorOpPolicy)
typename Policy_,
///< Number of partitions along K dimension
int PartitionsK_ = 1,
///< Used for partial specialization
typename Enable = bool
>
class MmaTensorOpWmma {
public:
///< Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
///< Data type of multiplicand A
using ElementA = ElementA_;
///< Layout of multiplicand A
using LayoutA = LayoutA_;
///< Data type of multiplicand B
using ElementB = ElementB_;
///< Layout of multiplicand B
using LayoutB = LayoutB_;
///< Data type of accumulator matrix C
using ElementC = ElementC_;
///< Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaTensorOpPolicy)
using Policy = Policy_;
/// Underlying instruction shape
using InstructionShape = typename Policy::Operator::Shape;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Underlying architecture tag
using ArchTag = typename Policy::Operator::ArchTag;
/// Complex transform on A operand
static ComplexTransform const kTransformA = ComplexTransform::kNone;
/// Complex transform on B operand
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassWmmaTensorOp;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpWmmaMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>, Operand::kA, ElementA, LayoutA,
Policy::OpDelta::kRow, kThreadCount, Policy>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpWmmaMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>, Operand::kB, ElementB, LayoutB,
Policy::OpDelta::kRow, kThreadCount, Policy>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpWmmaAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC,
typename Policy::OpDelta, Policy>;
/// Storage for C tile
using FragmentC = typename IteratorC::Fragment;
private:
static_assert(
!(Shape::kM % Policy::Operator::Shape::kM) &&
!(Shape::kN % Policy::Operator::Shape::kN),
"Shape of warp-level Wmma must be divisible by operator shape (wmma native size)");
/// Number of wmma operations performed
using WmmaIterations = MatrixShape<
Shape::kM / Policy::Operator::Shape::kM,
Shape::kN / Policy::Operator::Shape::kN
>;
public:
/// Underlying matrix multiply operator (concept: cutlass::arch::Wmma)
typename Policy::Operator wmma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaTensorOpWmma() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < WmmaIterations::kColumn; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < WmmaIterations::kRow; ++m) {
// accumulate wmma mma
wmma(D[m * WmmaIterations::kColumn + n], A[m], B[n], C[m * WmmaIterations::kColumn + n]);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
#endif // if defined(CUTLASS_ARCH_WMMA_ENABLED)
| include/cutlass/gemm/warp/mma_tensor_op_wmma.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_wmma.h",
"repo_id": "include",
"token_count": 2225
} | 35 |
Subsets and Splits