text
stringlengths 64
2.42M
| id
stringlengths 10
118
| metadata
dict | __index_level_0__
int64 0
65
|
---|---|---|---|
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Example of a Hopper gather+GEMM+scatter kernel fusion.
This example fuses gather before GEMM and scatter after GEMM into the same
GEMM kernel. Gather and scatter operation is controled by an index vector
to select rows or columns from A, B, C or D matrices.
Gather/scatter operations are always performed along a strided dimension
in order to preserve vectorized loads/stores. Thus the index vector is
applied to rows of row-major matrices and columns of column-major matrices.
Note that the index vector must contain integers in range [0,X) where
X is one of (M,N,K), depending on selected gather dimension. The problem
shape given to the GEMM kernel must consist of matrix sizes AFTER gather
and BEFORE scatter operations are applied.
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <iostream>
#include <random>
#include <numeric>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/util/command_line.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
#include "gather_gemm.hpp"
#include "gather_kernel.cuh"
#include "scatter_epilogue.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
using namespace cute;
namespace example {
// Command line options parsing
struct Options {
bool help = false;
cutlass::gemm::BatchedGemmCoord problem_size = {2048, 2048, 2048, 1};
int index_size = 1024;
int mode = 1; // N-mode gather/scatter by default
float alpha = 1.0f;
float beta = 0.0f;
bool reference_check = true;
int iterations = 20;
bool valid() const {
return problem_size.m() > 0
&& problem_size.n() > 0
&& problem_size.k() > 0
&& problem_size.batch() > 0
&& 0 <= mode && mode < 3
&& index_size <= problem_size.at(mode)
&& iterations > 0;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("batch_size", problem_size.batch());
cmd.get_cmd_line_argument("index_size", index_size);
char const modes[] = {'m', 'n', 'k'};
char mode_input = modes[mode];
cmd.get_cmd_line_argument("mode", mode_input);
mode = int(std::distance(std::begin(modes), std::find(std::begin(modes), std::end(modes), mode_input)));
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("check", reference_check, true);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out <<
"52_hopper_gather_scatter_fusion example\n"
"\n"
" This example uses the CUTLASS Library to fuse gather/scatter of input/output tensors with GEMM.\n"
" It validates and benchmarks the fused kernel against an unfused implementation that executes\n"
" gather+GEMM+scatter in sequence and writes intermediate (gathered) tensors to memory.\n"
" For the unfused implementation two GEMM kernels are considered: default one that uses the same\n"
" schedule and instruction set as the fused one, and an optimized one that utilizes advanced\n"
" features (such as TMA units) that cannot be used by the fused kernel due to hardware constraints."
"\n"
"Options:\n"
" --help If specified, displays this usage statement.\n"
" --m=<int> GEMM M dimension\n"
" --n=<int> GEMM N dimension\n"
" --k=<int> GEMM K dimension\n"
" --batch_size=<int> GEMM batch size\n"
" --index_size=<int> Size of N dimension gather/scatter index\n"
" --mode=<m,n,k> Gather mode (M, N, or K)\n"
" --alpha=<float> GEMM alpha parameter\n"
" --beta=<float> GEMM beta parameter\n"
" --iterations=<int> Number of profiling iterations to perform.\n"
"\n"
"Examples:\n"
"\n"
"$ ./examples/52_hopper_gather_scatter_fusion/52_hopper_gather_scatter_fusion --m=1024 --n=2048 --k=1024 --mode=n --index_size=1024\n";
return out;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template<class ElementA, class LayoutA, class GatherA,
class ElementB, class LayoutB, class GatherB,
class ElementC, class LayoutC, class GatherC,
class ElementD, class LayoutD, class ScatterD,
class ElementAccumulator, class ElementComputeEpilogue>
struct ExampleRunner
{
// Useful aliases
using ProblemShape = Shape<int,int,int,int>;
using StrideA = cutlass::gemm::TagToStrideA_t<LayoutA>;
using StrideB = cutlass::gemm::TagToStrideB_t<LayoutB>;
using StrideC = cutlass::gemm::TagToStrideC_t<LayoutC>;
using StrideD = cutlass::gemm::TagToStrideC_t<LayoutD>;
// Alias to for the epilogue type that supports gather/scatter
using Epilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<
cutlass::epilogue::collective::EpilogueGatherScatter<
StrideC, StrideD,
cutlass::epilogue::thread::LinearCombination<
ElementD, 1,
ElementAccumulator, ElementComputeEpilogue,
cutlass::epilogue::thread::ScaleType::Default,
cutlass::FloatRoundStyle::round_to_nearest, ElementC
>,
cutlass::gemm::EpilogueDefault,
GatherC,
ScatterD
>
>;
// Alias to for the mainloop type
using Mainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 128 / cutlass::sizeof_bits<ElementA>::value,
ElementB, LayoutB, 128 / cutlass::sizeof_bits<ElementB>::value,
ElementAccumulator,
Shape<_128,_128,_64>,
Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelCpAsyncWarpSpecialized
>::CollectiveOp;
using Kernel = cutlass::gemm::kernel::GemmGather<
ProblemShape,
Mainloop,
Epilogue,
void,
GatherA,
GatherB
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<Kernel>;
static constexpr bool DoGatherA = not cutlass::platform::is_same<GatherA, NoGather>::value;
static constexpr bool DoGatherB = not cutlass::platform::is_same<GatherB, NoGather>::value;
static constexpr bool DoGatherC = not cutlass::platform::is_same<GatherC, NoGather>::value;
static constexpr bool DoScatterD = not cutlass::platform::is_same<ScatterD, NoGather>::value;
static constexpr bool GatherAonM = DoGatherA && cutlass::platform::is_same<LayoutA,cutlass::layout::RowMajor>::value;
static constexpr bool GatherAonK = DoGatherA && cutlass::platform::is_same<LayoutA,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherBonN = DoGatherB && cutlass::platform::is_same<LayoutB,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherBonK = DoGatherB && cutlass::platform::is_same<LayoutB,cutlass::layout::RowMajor>::value;
static constexpr bool GatherConM = DoGatherC && cutlass::platform::is_same<LayoutC,cutlass::layout::RowMajor>::value;
static constexpr bool GatherConN = DoGatherC && cutlass::platform::is_same<LayoutC,cutlass::layout::ColumnMajor>::value;
static constexpr bool ScatterDonM = DoScatterD && cutlass::platform::is_same<LayoutD,cutlass::layout::RowMajor>::value;
static constexpr bool ScatterDonN = DoScatterD && cutlass::platform::is_same<LayoutD,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherModeM = GatherAonM || GatherConM || ScatterDonM;
static constexpr bool GatherModeN = GatherBonN || GatherConN || ScatterDonN;
static constexpr bool GatherModeK = GatherAonK || GatherBonK;
static_assert( GatherModeM && !GatherModeN && !GatherModeK ||
!GatherModeM && GatherModeN && !GatherModeK ||
!GatherModeM && !GatherModeN && GatherModeK,
"Only one gather mode (M, N or K) is supported by example runner");
// Construct a reference (non-gather) GEMM kernel type
using MainloopRef = Mainloop;
using EpilogueRef = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<
cutlass::epilogue::collective::DefaultEpilogue<
StrideC, StrideD,
typename Epilogue::ThreadEpilogueOp,
typename Epilogue::EpilogueSchedule
>
>;
using KernelRef = cutlass::gemm::kernel::GemmUniversal<
ProblemShape,
MainloopRef,
EpilogueRef,
void
>;
using GemmRef = cutlass::gemm::device::GemmUniversalAdapter<KernelRef>;
// Construct an optimized reference GEMM kernel type (using TMA)
using EpilogueOpt = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_128,_128,_64>,
Shape<_2,_2,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
ElementAccumulator, ElementComputeEpilogue,
ElementC, LayoutC, 128 / cutlass::sizeof_bits<ElementC>::value,
ElementD, LayoutD, 128 / cutlass::sizeof_bits<ElementD>::value,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using MainloopOpt = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 128 / cutlass::sizeof_bits<ElementA>::value,
ElementB, LayoutB, 128 / cutlass::sizeof_bits<ElementB>::value,
ElementAccumulator,
Shape<_128,_128,_64>,
Shape<_2,_2,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<
static_cast<int>(sizeof(typename EpilogueOpt::SharedStorage))>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using KernelOpt = cutlass::gemm::kernel::GemmUniversal<
ProblemShape,
MainloopOpt,
EpilogueOpt,
void
>;
using GemmOpt = cutlass::gemm::device::GemmUniversalAdapter<KernelOpt>;
// Data members
cutlass::gemm::BatchedGemmCoord problem_size_orig;
cutlass::gemm::BatchedGemmCoord problem_size;
ProblemShape problem_shape_orig;
ProblemShape problem_shape;
cutlass::KernelHardwareInfo hw_info;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
StrideA stride_A_orig;
StrideB stride_B_orig;
StrideC stride_C_orig;
StrideD stride_D_orig;
StrideA stride_A;
StrideB stride_B;
StrideC stride_C;
StrideD stride_D;
cutlass::device_memory::allocation<ElementA> tensor_a;
cutlass::device_memory::allocation<ElementB> tensor_b;
cutlass::device_memory::allocation<ElementC> tensor_c;
cutlass::device_memory::allocation<ElementD> tensor_d;
cutlass::device_memory::allocation<int> gather_indices;
cutlass::device_memory::allocation<ElementA> tensor_a_gathered;
cutlass::device_memory::allocation<ElementB> tensor_b_gathered;
cutlass::device_memory::allocation<ElementC> tensor_c_gathered;
cutlass::device_memory::allocation<ElementD> tensor_d_gathered;
cutlass::device_memory::allocation<ElementD> tensor_d_reference;
cutlass::gemm::GemmUniversalMode gemm_mode;
Gemm gemm;
typename Gemm::Arguments arguments;
cutlass::device_memory::allocation<uint8_t> workspace;
GemmRef gemm_ref;
typename GemmRef::Arguments arguments_ref;
cutlass::device_memory::allocation<uint8_t> workspace_ref;
GemmOpt gemm_opt;
typename GemmOpt::Arguments arguments_opt;
cutlass::device_memory::allocation<uint8_t> workspace_opt;
ExampleRunner(Options const &options, cutlass::KernelHardwareInfo const &hw_info)
: problem_size_orig(options.problem_size),
problem_size(GatherModeM ? options.index_size : problem_size_orig.m(),
GatherModeN ? options.index_size : problem_size_orig.n(),
GatherModeK ? options.index_size : problem_size_orig.k(),
problem_size_orig.batch()),
problem_shape_orig(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.k(), problem_size_orig.batch()),
problem_shape(problem_size.m(), problem_size.n(), problem_size.k(), problem_size.batch()),
hw_info(hw_info),
alpha(options.alpha),
beta(options.beta),
stride_A_orig(cutlass::make_cute_packed_stride(
StrideA{}, make_shape(problem_size_orig.m(), problem_size_orig.k(), problem_size_orig.batch()))),
stride_B_orig(cutlass::make_cute_packed_stride(
StrideB{}, make_shape(problem_size_orig.n(), problem_size_orig.k(), problem_size_orig.batch()))),
stride_C_orig(cutlass::make_cute_packed_stride(
StrideC{}, make_shape(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.batch()))),
stride_D_orig(cutlass::make_cute_packed_stride(
StrideD{}, make_shape(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.batch()))),
stride_A(cutlass::make_cute_packed_stride(
StrideA{}, make_shape(problem_size.m(), problem_size.k(), problem_size.batch()))),
stride_B(cutlass::make_cute_packed_stride(
StrideB{}, make_shape(problem_size.n(), problem_size.k(), problem_size.batch()))),
stride_C(cutlass::make_cute_packed_stride(
StrideC{}, make_shape(problem_size.m(), problem_size.n(), problem_size.batch()))),
stride_D(cutlass::make_cute_packed_stride(
StrideD{}, make_shape(problem_size.m(), problem_size.n(), problem_size.batch()))),
tensor_a(problem_size_orig.m() * problem_size_orig.k() * problem_size_orig.batch()),
tensor_b(problem_size_orig.k() * problem_size_orig.n() * problem_size_orig.batch()),
tensor_c(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
tensor_d(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
gather_indices(options.index_size),
tensor_a_gathered(problem_size.m() * problem_size.k() * problem_size_orig.batch()),
tensor_b_gathered(problem_size.k() * problem_size.n() * problem_size_orig.batch()),
tensor_c_gathered(problem_size.m() * problem_size.n() * problem_size_orig.batch()),
tensor_d_gathered(problem_size.m() * problem_size.n() * problem_size_orig.batch()),
tensor_d_reference(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
gemm_mode(problem_size.batch() > 1 ? cutlass::gemm::GemmUniversalMode::kBatched : cutlass::gemm::GemmUniversalMode::kGemm),
gemm(),
// When constructing arguments for gather/scatter gemm, we must pass stride arguments
// made for the original (non-gathered) problem size, because they are used to access
// tensors of the original shape. However we still use the reduced (gathered) problem
// shape since it corresponds to the logical indexing in reduced size GEMM.
arguments{
gemm_mode,
problem_shape,
{
tensor_a.get(),
stride_A_orig,
tensor_b.get(),
stride_B_orig
},
{
{ alpha, beta },
tensor_c.get(), stride_C_orig,
tensor_d.get(), stride_D_orig,
typename Epilogue::GatherC {gather_indices.get()},
typename Epilogue::ScatterD{gather_indices.get()}
},
hw_info,
{},
typename Kernel::GatherA{gather_indices.get()},
typename Kernel::GatherB{gather_indices.get()}
},
workspace(Gemm::get_workspace_size(arguments)),
gemm_ref(),
arguments_ref{
gemm_mode,
problem_shape,
{
DoGatherA ? tensor_a_gathered.get() : tensor_a.get(),
stride_A,
DoGatherB ? tensor_b_gathered.get() : tensor_b.get(),
stride_B
},
{
{ alpha, beta },
DoGatherC ? tensor_c_gathered.get() : tensor_c.get(),
stride_C,
DoScatterD ? tensor_d_gathered.get() : tensor_d_reference.get(),
stride_D
},
hw_info
},
workspace_ref(GemmRef::get_workspace_size(arguments_ref)),
gemm_opt(),
arguments_opt{
gemm_mode,
problem_shape,
{
DoGatherA ? tensor_a_gathered.get() : tensor_a.get(),
stride_A,
DoGatherB ? tensor_b_gathered.get() : tensor_b.get(),
stride_B
},
{
{ alpha, beta },
DoGatherC ? tensor_c_gathered.get() : tensor_c.get(),
stride_C,
DoScatterD ? tensor_d_gathered.get() : tensor_d_reference.get(),
stride_D
},
hw_info
},
workspace_opt(GemmOpt::get_workspace_size(arguments_opt))
{
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::device::BlockFillRandomUniform(tensor_a.get(), tensor_a.size(), 1, ElementA(7), ElementA(-8), 0);
cutlass::reference::device::BlockFillRandomUniform(tensor_b.get(), tensor_b.size(), 1, ElementB(7), ElementB(-8), 0);
cutlass::reference::device::BlockFillRandomUniform(tensor_c.get(), tensor_c.size(), 1, ElementC(7), ElementC(-8), 0);
cutlass::reference::device::BlockFillSequential(tensor_d.get(), tensor_d.size(), ElementD(0), ElementD(0));
// <- Fill gather_indices with unique random integers in range [0,n)
int index_range = GatherModeM ? problem_size_orig.m() : (GatherModeN ? problem_size_orig.n() : problem_size_orig.k());
std::vector<int> indices(index_range);
std::iota(indices.begin(), indices.end(), 0);
{ // std::random_shuffle was deprecated in C++14 and removed in C++17
std::random_device make_seed;
std::mt19937 source_of_randomness(make_seed());
std::shuffle(indices.begin(), indices.end(), source_of_randomness);
}
gather_indices.copy_from_host(indices.data());
auto const gemm_init = [](auto & gemm, auto const & arguments, auto & workspace)
{
cutlass::Status status = gemm.can_implement(arguments);
CUTLASS_CHECK(status);
status = gemm.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
};
gemm_init(gemm, arguments, workspace );
gemm_init(gemm_ref, arguments_ref, workspace_ref);
gemm_init(gemm_opt, arguments_opt, workspace_opt);
}
void debug_output(std::ostream & os)
{
auto print_tensor = [](std::ostream &os, char const * name, auto const & data, auto shape, auto stride)
{
std::vector<remove_cvref_t<decltype(*data.get())>> h_data(data.size());
data.copy_to_host(h_data.data());
Tensor t = make_tensor(h_data.data(), shape, stride);
os << "\n" << name << ": " << std::setw(4) << t << std::endl;
};
{
auto [M,N,K,L] = problem_shape_orig;
print_tensor(os, "A", tensor_a, make_shape(M,K,L), stride_A_orig);
print_tensor(os, "B", tensor_b, make_shape(N,K,L), stride_B_orig);
print_tensor(os, "C", tensor_c, make_shape(M,N,L), stride_C_orig);
print_tensor(os, "D", tensor_d, make_shape(M,N,L), stride_D_orig);
print_tensor(os, "D reference", tensor_d_reference, make_shape(M,N,L), stride_D_orig);
print_tensor(os, "indices", gather_indices, make_shape(gather_indices.size()), make_stride(_1{}));
}
}
template<class Gemm2>
static void run_gemm(Gemm2 &gemm)
{
cutlass::Status status = gemm.run();
CUTLASS_CHECK(status);
}
template<class Gemm2>
void run_reference(Gemm2 &gemm)
{
// Convenience wrapper around calls to separate gather/scatter kernels
auto run_gather = [this](auto call, auto const & input, auto & output, auto gather_func, auto batch_size, auto stride)
{
[[maybe_unused]] auto idx = find_if(stride, [](auto x){ return not is_constant<1, decltype(x)>{}; });
constexpr int I = decltype(idx)::value;
call(input.get(),
output.get(),
gather_func,
batch_size,
static_cast<int>(input.size() / batch_size),
static_cast<int>(output.size() / batch_size),
static_cast<int>(get<I>(stride)),
hw_info);
};
// Forward calls via lambda to avoid specifying template arguments
auto gather_call = [](auto&&... args){ gather(static_cast<decltype(args)&&>(args)...); };
// MSVC doesn't count use inside a false "if constexpr" branch.
[[maybe_unused]] auto scatter_call = [](auto&&... args){ scatter(static_cast<decltype(args)&&>(args)...); };
if constexpr (DoGatherA) {
run_gather(gather_call, tensor_a, tensor_a_gathered, arguments.gather_A, problem_size.batch(), stride_A);
}
if constexpr (DoGatherB) {
run_gather(gather_call, tensor_b, tensor_b_gathered, arguments.gather_B, problem_size.batch(), stride_B);
}
if constexpr (DoGatherC) {
if (beta != ElementComputeEpilogue(0)) {
run_gather(gather_call, tensor_c, tensor_c_gathered, arguments.epilogue.gather_C, problem_size.batch(), stride_C);
}
}
run_gemm(gemm);
if constexpr (DoScatterD) {
run_gather(scatter_call, tensor_d_gathered, tensor_d_reference, arguments.epilogue.scatter_D, problem_size.batch(), stride_D);
}
}
bool verify()
{
run_gemm(gemm);
run_reference(gemm_ref);
cudaDeviceSynchronize();
return cutlass::reference::device::BlockCompareEqual(tensor_d.get(), tensor_d_reference.get(), tensor_d.size());
}
bool run(Options const &options)
{
if (options.reference_check) {
if (!verify()) {
std::cout << "Failed validation" << std::endl;
#if 0
debug_output(std::cout);
#endif
return false;
}
else {
std::cout << "Passed validation" << std::endl;
}
}
//
// Run profiling loop
//
auto const benchmark = [&](auto name, auto func)
{
GpuTimer timer;
timer.start();
for (int iter = 0; iter < options.iterations; ++iter) {
func();
}
timer.stop();
double runtime = timer.elapsed_millis() / double(options.iterations);
double gflops = 2 * double(problem_size.product()) / 1e6 / runtime; // Two flops per multiply-add
std::cout << name << ":\n";
std::cout << " Runtime: " << runtime << " ms\n";
std::cout << " GFLOPs: " << gflops << "\n";
};
benchmark("Fused", [&](){ run_gemm(gemm); });
benchmark("Unfused default", [&](){ run_reference(gemm_ref); });
benchmark("Unfused optimized", [&](){ run_reference(gemm_opt); });
return true;
}
};
} // namespace example
int main(int argc, const char ** argv) {
bool notSupported = false;
// CUDA 12 minimum required
if (__CUDACC_VER_MAJOR__ < 12) {
std::cerr << "This example requires CUDA Toolkit version 12 or later.\n";
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (props.major < 9) {
std::cerr << "This example requires a device with compute capability 90 or higher.\n";
notSupported = true;
}
if (notSupported) {
return EXIT_SUCCESS; // Do not fail CI checks on unsupported systems
}
example::Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << "\n";
return EXIT_SUCCESS;
}
if (!options.valid()) {
std::cerr << "Invalid arguments." << "\n";
return EXIT_FAILURE;
}
cutlass::KernelHardwareInfo hw_info;
hw_info.device_id = 0;
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
bool result = true;
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
switch (options.mode) {
using namespace example;
case 0: {
std::cout << "Gather A,C + scatter D on M mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // A
cutlass::half_t, cutlass::layout::ColumnMajor, NoGather, // B
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // C
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
case 1: {
std::cout << "Gather B,C + scatter D on N mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // A
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // B
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // C
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
case 2: {
std::cout << "Gather A,B on K mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // A
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // B
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // C
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
}
#endif
return result ? EXIT_SUCCESS : EXIT_FAILURE;
}
| examples/52_hopper_gather_scatter_fusion/52_hopper_gather_scatter_fusion.cu/0 | {
"file_path": "examples/52_hopper_gather_scatter_fusion/52_hopper_gather_scatter_fusion.cu",
"repo_id": "examples",
"token_count": 10782
} | 9 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Hopper Ptr-Array Batched GEMM example using CUTLASS 3 APIs for NVIDIA Hopper architecture.
This example demonstrates an implementation of Ptr-Array Batched GEMM using a TMA + GMMA
warp-specialized cooperative kernel.
The new feature showcased in this example is on-the-fly modification of TMA descriptors
to move between batches (represented by l).
To run this example:
$ ./examples/56_hopper_ptr_array_batched_gemm/56_hopper_ptr_array_batched_gemm --m=2048 --n=2048 --k=2048 --l=10
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cutlass/tensor_ref.h"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/group_array_problem_shape.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "helper.h"
using namespace cute;
#if defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM kernel configurations
/////////////////////////////////////////////////////////////////////////////////////////////////
// A matrix configuration
using ElementA = cutlass::half_t; // Element type for A matrix operand
using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand
constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes)
// B matrix configuration
using ElementB = cutlass::half_t; // Element type for B matrix operand
using LayoutB = cutlass::layout::ColumnMajor; // Layout type for B matrix operand
constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes)
// C/D matrix configuration
using ElementC = cutlass::half_t; // Element type for C and D matrix operands
using LayoutC = cutlass::layout::ColumnMajor; // Layout type for C and D matrix operands
constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value; // Memory access granularity/alignment of C matrix in units of elements (up to 16 bytes)
// Core kernel configurations
using ElementAccumulator = float; // Element type for internal accumulation
using ArchTag = cutlass::arch::Sm90; // Tag indicating the minimum SM that supports the intended feature
using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag
using TileShape = Shape<_256,_128,_64>; // Threadblock-level tile size
using ClusterShape = Shape<_1,_2,_1>; // Shape of the threadblocks in a cluster
using StageCountType = cutlass::gemm::collective::StageCountAuto; // Stage count maximized based on the tile size
using KernelSchedule = cutlass::gemm::KernelPtrArrayTmaWarpSpecializedCooperative; // Kernel to launch
using EpilogueSchedule = cutlass::epilogue::PtrArrayTmaWarpSpecializedCooperative; // Epilogue to launch
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape, ClusterShape,
cutlass::epilogue::collective::EpilogueTileAuto,
ElementAccumulator, ElementAccumulator,
ElementC, LayoutC, AlignmentC,
ElementC, LayoutC, AlignmentC,
EpilogueSchedule
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
ArchTag, OperatorClass,
ElementA, LayoutA, AlignmentA,
ElementB, LayoutB, AlignmentB,
ElementAccumulator,
TileShape, ClusterShape,
cutlass::gemm::collective::StageCountAutoCarveout<
static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
KernelSchedule
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
cutlass::gemm::ArrayProblemShape<Shape<int,int,int,int>>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
// Reference device GEMM implementation type
using DeviceGemmReference = cutlass::reference::device::Gemm<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
ElementAccumulator>;
using StrideA = typename Gemm::GemmKernel::StrideA;
using StrideB = typename Gemm::GemmKernel::StrideB;
using StrideC = typename Gemm::GemmKernel::StrideC;
using StrideD = typename Gemm::GemmKernel::StrideD;
StrideA stride_A;
StrideB stride_B;
StrideC stride_C;
StrideD stride_D;
uint64_t seed;
std::vector<int64_t> offset_A;
std::vector<int64_t> offset_B;
std::vector<int64_t> offset_C;
std::vector<int64_t> offset_D;
cutlass::DeviceAllocation<typename Gemm::ElementA> block_A;
cutlass::DeviceAllocation<typename Gemm::ElementB> block_B;
cutlass::DeviceAllocation<typename Gemm::ElementC> block_C;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput> block_D;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput> block_ref_D;
cutlass::DeviceAllocation<const typename Gemm::ElementA *> ptr_A;
cutlass::DeviceAllocation<const typename Gemm::ElementB *> ptr_B;
cutlass::DeviceAllocation<const typename Gemm::ElementC *> ptr_C;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput *> ptr_D;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput *> ptr_ref_D;
#endif // defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Testbed utility types
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help = false;
float alpha = 1.0f;
float beta = 0.0f;
int iterations = 10;
int m = 1024, n = 512, k = 1024, l = 10;
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("m", m);
cmd.get_cmd_line_argument("n", n);
cmd.get_cmd_line_argument("k", k);
cmd.get_cmd_line_argument("l", l);
cmd.get_cmd_line_argument("alpha", alpha, 1.f);
cmd.get_cmd_line_argument("beta", beta, 0.f);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "56_hopper_ptr_array_batched_gemm\n\n"
<< " Hopper FP32 GEMM using a Warp Specialized kernel.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement\n\n"
<< " --m=<int> Sets the M extent of the GEMM\n"
<< " --n=<int> Sets the N extent of the GEMM\n"
<< " --k=<int> Sets the K extent of the GEMM\n"
<< " --l=<int> Sets the batch count for Ptr-Array GEMM\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --iterations=<int> Number of profiling iterations to perform\n\n";
out
<< "\n\nExamples:\n\n"
<< "$ " << "56_hopper_ptr_array_batched_gemm" << " --m=1024 --n=512 --k=1024 --l=10 --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const
{
// Two flops per multiply-add
uint64_t flop = uint64_t(2) * m * n * k * l;
double gflop = double(flop) / double(1.0e9);
return gflop / runtime_s;
}
};
/// Result structure
struct Result
{
double avg_runtime_ms = 0.0;
double gflops = 0.0;
cutlass::Status status = cutlass::Status::kSuccess;
cudaError_t error = cudaSuccess;
bool passed = false;
};
#if defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM setup and evaluation
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to initialize a block of device data
template <class Element>
bool initialize_block(
cutlass::DeviceAllocation<Element>& block,
uint64_t seed=2023) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::device::BlockFillRandomUniform(
block.get(), block.size(), seed, scope_max, scope_min, 0);
return true;
}
/// Allocates device-side data
void allocate(const Options &options) {
int64_t total_elements_A = 0;
int64_t total_elements_B = 0;
int64_t total_elements_C = 0;
int64_t total_elements_D = 0;
for (int32_t i = 0; i < options.l; ++i) {
offset_A.push_back(total_elements_A);
offset_B.push_back(total_elements_B);
offset_C.push_back(total_elements_C);
offset_D.push_back(total_elements_D);
int64_t elements_A = options.m * options.k;
int64_t elements_B = options.k * options.n;
int64_t elements_C = options.m * options.n;
int64_t elements_D = options.m * options.n;
total_elements_A += elements_A;
total_elements_B += elements_B;
total_elements_C += elements_C;
total_elements_D += elements_D;
}
block_A.reset(total_elements_A);
block_B.reset(total_elements_B);
block_C.reset(total_elements_C);
block_D.reset(total_elements_D);
block_ref_D.reset(total_elements_D);
}
/// Initialize operands to be used in the GEMM and reference GEMM
void initialize(const Options &options) {
stride_A = cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(options.m, options.k, options.l));
stride_B = cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(options.n, options.k, options.l));
stride_C = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(options.m, options.n, options.l));
stride_D = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(options.m, options.n, options.l));
//
// Assign pointers
//
std::vector<ElementA *> ptr_A_host(options.l);
std::vector<ElementB *> ptr_B_host(options.l);
std::vector<ElementC *> ptr_C_host(options.l);
std::vector<ElementC *> ptr_D_host(options.l);
for (int32_t i = 0; i < options.l; ++i) {
ptr_A_host.at(i) = block_A.get() + offset_A.at(i);
ptr_B_host.at(i) = block_B.get() + offset_B.at(i);
ptr_C_host.at(i) = block_C.get() + offset_C.at(i);
ptr_D_host.at(i) = block_D.get() + offset_D.at(i);
}
ptr_A.reset(options.l);
ptr_A.copy_from_host(ptr_A_host.data());
ptr_B.reset(options.l);
ptr_B.copy_from_host(ptr_B_host.data());
ptr_C.reset(options.l);
ptr_C.copy_from_host(ptr_C_host.data());
ptr_D.reset(options.l);
ptr_D.copy_from_host(ptr_D_host.data());
initialize_block(block_A, seed + 2023);
initialize_block(block_B, seed + 2022);
initialize_block(block_C, seed + 2021);
}
/// Populates a Gemm::Arguments structure from the given commandline options
typename Gemm::Arguments args_from_options(const Options &options)
{
cutlass::KernelHardwareInfo hw_info;
// Change device_id to another value if you are running on a machine with multiple GPUs and wish
// to use a GPU other than that with device ID 0.
hw_info.device_id = 0;
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
typename Gemm::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kArray,
{{options.m, options.n, options.k, options.l}},
{ptr_A.get(), stride_A, ptr_B.get(), stride_B},
{{options.alpha, options.beta}, ptr_C.get(), stride_C, ptr_D.get(), stride_D},
hw_info
};
return arguments;
}
bool verify(const Options &options) {
bool passed = true;
for (int32_t i = 0; i < options.l; ++i) {
cutlass::TensorRef ref_A(block_A.get() + offset_A.at(i), Gemm::LayoutA::packed({options.m, options.k}));
cutlass::TensorRef ref_B(block_B.get() + offset_B.at(i), Gemm::LayoutB::packed({options.k, options.n}));
cutlass::TensorRef ref_C(block_C.get() + offset_C.at(i), Gemm::LayoutC::packed({options.m, options.n}));
cutlass::TensorRef ref_D(block_ref_D.get() + offset_D.at(i), Gemm::LayoutD::packed({options.m, options.n}));
//
// Compute reference output
//
// Create instantiation for device reference gemm kernel
DeviceGemmReference gemm_reference;
// Launch device reference gemm kernel
gemm_reference(
{options.m, options.n, options.k},
ElementAccumulator(options.alpha),
ref_A,
ref_B,
ElementAccumulator(options.beta),
ref_C,
ref_D);
// Wait for kernel to finish
CUDA_CHECK(cudaDeviceSynchronize());
// Check if output from CUTLASS kernel and reference kernel are equal or not
passed &= cutlass::reference::device::BlockCompareEqual(block_ref_D.get() + offset_D.at(i), block_D.get() + offset_D.at(i), options.m * options.n);
}
return passed;
}
/// Execute a given example GEMM computation
template <typename Gemm>
int run(Options &options)
{
allocate(options);
initialize(options);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm;
// Create a structure of gemm kernel arguments suitable for invoking an instance of Gemm
auto arguments = args_from_options(options);
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check if the problem size is supported or not
CUTLASS_CHECK(gemm.can_implement(arguments));
// Initialize CUTLASS kernel with arguments and workspace pointer
CUTLASS_CHECK(gemm.initialize(arguments, workspace.get()));
// Correctness / Warmup iteration
CUTLASS_CHECK(gemm.run());
// Check if output from CUTLASS kernel and reference kernel are equal or not
Result result;
result.passed = verify(options);
std::cout << " Disposition: " << (result.passed ? "Passed" : "Failed") << std::endl;
if (!result.passed) {
exit(-1);
}
// Run profiling loop
if (options.iterations > 0)
{
GpuTimer timer;
timer.start();
for (int iter = 0; iter < options.iterations; ++iter) {
CUTLASS_CHECK(gemm.initialize(arguments, workspace.get()));
CUTLASS_CHECK(gemm.run());
}
timer.stop();
// Compute average setup and runtime and GFLOPs.
float elapsed_ms = timer.elapsed_millis();
result.avg_runtime_ms = double(elapsed_ms) / double(options.iterations);
result.gflops = options.gflops(result.avg_runtime_ms / 1000.0);
std::cout << " Problem Size: " << options.m << 'x' << options.n << 'x' << options.k << std::endl;
std::cout << " Batches : " << options.l << std::endl;
std::cout << " Alpha, Beta : " << options.alpha << ',' << options.beta << std::endl;
std::cout << " Avg runtime : " << result.avg_runtime_ms << " ms" << std::endl;
std::cout << " GFLOPS : " << result.gflops << std::endl;
}
return 0;
}
#endif // defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
// CUTLASS must be compiled with CUDA 12.3 Toolkit to run this example
if (__CUDACC_VER_MAJOR__ < 12 || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ < 3)) {
std::cerr << "This example requires CUDA 12.3 or newer.\n";
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
cudaDeviceProp props;
int current_device_id;
CUDA_CHECK(cudaGetDevice(¤t_device_id));
CUDA_CHECK(cudaGetDeviceProperties(&props, current_device_id));
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (props.major < 9) {
std::cerr
<< "This example requires a GPU of NVIDIA's Hopper Architecture or "
<< "later (compute capability 90 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
//
// Evaluate CUTLASS kernels
//
#if defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
run<Gemm>(options);
#endif
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/56_hopper_ptr_array_batched_gemm/56_hopper_ptr_array_batched_gemm.cu/0 | {
"file_path": "examples/56_hopper_ptr_array_batched_gemm/56_hopper_ptr_array_batched_gemm.cu",
"repo_id": "examples",
"token_count": 7152
} | 10 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/atom/mma_atom.hpp>
#include <cute/algorithm/axpby.hpp>
#include <cute/algorithm/functional.hpp>
#include <cute/algorithm/gemm.hpp>
#include <cute/tensor_impl.hpp>
namespace cute
{
//
// Cooperative Shared-Memory GEMMs
//
namespace detail {
// Predicated Cooperative GEMM
template <class... Args,
class Alpha, class TA, class ALayout, class TB, class BLayout,
class Beta, class TC, class CLayout,
class ALoadTransformOp, class BLoadTransformOp,
class CLoadTransformOp, class CStoreTransformOp,
__CUTE_REQUIRES(ALayout::rank == 2 && is_smem<TA>::value &&
BLayout::rank == 2 && is_smem<TB>::value &&
CLayout::rank == 2 && is_smem<TC>::value)>
CUTE_HOST_DEVICE
void
cooperative_gemm_predication(ThrMMA<Args...> const& thr_mma,
Alpha const& alpha,
Tensor<TA, ALayout> sA,
Tensor<TB, BLayout> sB,
Beta const& beta,
Tensor<TC, CLayout> sC,
ALoadTransformOp const& sA_load_op, // transforms A values before use in GEMM
BLoadTransformOp const& sB_load_op, // transforms B values before use in GEMM
CLoadTransformOp const& sC_load_op, // transforms C values before use in GEMM
CStoreTransformOp const& sC_store_op) // transforms results before they are stored to C
{
using TypeA = typename TA::value_type;
using TypeB = typename TB::value_type;
using TypeC = typename TC::value_type;
//
// MMA Partitioning
//
// Partition the sA, sB, and sC tiles across the threads for the MMA
Tensor tCsA = thr_mma.partition_A(sA); // (MMA,MMA_M,MMA_K)
Tensor tCsB = thr_mma.partition_B(sB); // (MMA,MMA_N,MMA_K)
Tensor tCsC = thr_mma.partition_C(sC); // (MMA,MMA_M,MMA_N)
// Create register tensors for the MMA to operate on
Tensor tCrA = thr_mma.make_fragment_A(tCsA); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K)
Tensor tCrC = thr_mma.make_fragment_C(tCsC); // (MMA,MMA_M,MMA_N)
#if 0
if (thread0()) {
print(" sA: "); print( sA); print("\n");
print(" sB: "); print( sB); print("\n");
print(" sC: "); print( sC); print("\n");
print(thr_mma);
print("tCsA: "); print(tCsA); print("\n");
print("tCsB: "); print(tCsB); print("\n");
print("tCsC: "); print(tCsC); print("\n");
print("tCrA: "); print(tCrA); print("\n");
print("tCrB: "); print(tCrB); print("\n");
print("tCrC: "); print(tCrC); print("\n");
}
#endif
//
// PREDICATION
//
// Create coordinate tensors for the problem
Tensor cA = make_identity_tensor(shape(sA)); // (M,K) -> (m,k)
Tensor cB = make_identity_tensor(shape(sB)); // (N,K) -> (n,k)
// Repeat partitioning with thr_mma
Tensor tCcA = thr_mma.partition_A(cA); // (MMA,MMA_M,MMA_K) -> (m,k)
Tensor tCcB = thr_mma.partition_B(cB); // (MMA,MMA_N,MMA_K) -> (n,k)
// Allocate the preds for MMA- and MMA_MN-modes
Tensor tCpA = make_tensor<bool>(make_shape(size<0>(tCsA), size<1>(tCsA)));
Tensor tCpB = make_tensor<bool>(make_shape(size<0>(tCsB), size<1>(tCsB)));
// Populate the predicates on M and N
CUTE_UNROLL
for (int i = 0; i < size(tCpA); ++i) {
tCpA(i) = elem_less(get<0>(tCcA(_,_,Int<0>{})(i)), shape<0>(sA));
}
CUTE_UNROLL
for (int i = 0; i < size(tCpB); ++i) {
tCpB(i) = elem_less(get<0>(tCcB(_,_,Int<0>{})(i)), shape<0>(sB));
}
#if 0
if (thread0()) {
print(" cA: "); print( cA); print("\n");
print(" cB: "); print( cB); print("\n");
print("tCcA: "); print(tCcA); print("\n");
print("tCcB: "); print(tCcB); print("\n");
print_tensor(tCpA);
print_tensor(tCpB);
}
#endif
//
// PREFETCH k_block = 0
// Condition the k-predication on (static) k_block == K_BLOCK_MAX-1, the last k_block
// Assumes the MMA-tiling in K is trivial
//
constexpr int K_BLOCK_MAX = size<2>(tCrA);
CUTE_UNROLL
for (int m = 0; m < size<1>(tCrA); ++m) { // Copy MMA_M
CUTE_UNROLL
for (int i = 0; i < size<0>(tCrA); ++i) { // Copy MMA_I
tCrA(i,m,0) = (tCpA(i,m) && (0 < K_BLOCK_MAX-1 || elem_less(get<1>(tCcA(i,m,0)), shape<1>(sA)))) ? sA_load_op(tCsA(i,m,0)) : TypeA{};
}
}
CUTE_UNROLL
for (int n = 0; n < size<1>(tCrB); ++n) { // Copy MMA_N
CUTE_UNROLL
for (int i = 0; i < size<0>(tCrB); ++i) { // Copy MMA_I
tCrB(i,n,0) = (tCpB(i,n) && (0 < K_BLOCK_MAX-1 || elem_less(get<1>(tCcB(i,n,0)), shape<1>(sB)))) ? sB_load_op(tCsB(i,n,0)) : TypeB{};
}
}
//
// MAINLOOP
//
// Clear accumulators
clear(tCrC);
CUTE_UNROLL
for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block)
{
if (k_block < K_BLOCK_MAX-1) // static-if not the last k_block
{
int k_next = k_block + 1; // Load k_next block
// Condition the k-predication on (static) k_block == K_BLOCK_MAX-1, the last k_block
// Assumes the MMA-tiling in K is trivial
CUTE_UNROLL
for (int m = 0; m < size<1>(tCrA); ++m) { // Copy MMA_M
CUTE_UNROLL
for (int i = 0; i < size<0>(tCrA); ++i) { // Copy MMA_I
tCrA(i,m,k_next) = (tCpA(i,m) && (k_next < K_BLOCK_MAX-1 || elem_less(get<1>(tCcA(i,m,k_next)), shape<1>(sA)))) ? sA_load_op(tCsA(i,m,k_next)) : TypeA{};
}
}
CUTE_UNROLL
for (int n = 0; n < size<1>(tCrB); ++n) { // Copy MMA_N
CUTE_UNROLL
for (int i = 0; i < size<0>(tCrB); ++i) { // Copy MMA_I
tCrB(i,n,k_next) = (tCpB(i,n) && (k_next < K_BLOCK_MAX-1 || elem_less(get<1>(tCcB(i,n,k_next)), shape<1>(sB)))) ? sB_load_op(tCsB(i,n,k_next)) : TypeB{};
}
}
}
// GEMM on k_block in registers
gemm(thr_mma, tCrA(_,_,k_block), tCrB(_,_,k_block), tCrC);
}
//
// Epilogue
//
// Create coordinate tensors for the problem
Tensor cC = make_identity_tensor(shape(sC)); // (M,N) -> (m,n)
// Repeat partitioning with thr_mma
Tensor tCcC = thr_mma.partition_C(cC); // (MMA,MMA_M,MMA_N) -> (m,n)
const bool isBetaZero = (beta == Beta{});
// Custom axpby_if for now
CUTE_UNROLL
for (int i = 0; i < size(tCrC); ++i)
{
if (elem_less(tCcC(i), shape(sC)))
{
tCsC(i) = sC_store_op(isBetaZero ? alpha * static_cast<TypeC>(tCrC(i))
: alpha * static_cast<TypeC>(tCrC(i)) +
beta * static_cast<TypeC>(sC_load_op(tCsC(i))));
}
}
}
// Slow fallback path
template <class... Args,
class Alpha, class TA, class ALayout, class TB, class BLayout,
class Beta, class TC, class CLayout,
class ALoadTransformOp, class BLoadTransformOp,
class CLoadTransformOp, class CStoreTransformOp,
__CUTE_REQUIRES(ALayout::rank == 2 && is_smem<TA>::value &&
BLayout::rank == 2 && is_smem<TB>::value &&
CLayout::rank == 2 && is_smem<TC>::value)>
CUTE_HOST_DEVICE
void
cooperative_gemm_predication(uint32_t thread_idx,
TiledMMA<Args...> const& tiled_mma,
Alpha const& alpha,
Tensor<TA, ALayout> sA,
Tensor<TB, BLayout> sB,
Beta const& beta,
Tensor<TC, CLayout> sC,
ALoadTransformOp const& sA_load_op, // transforms A values before use in GEMM
BLoadTransformOp const& sB_load_op, // transforms B values before use in GEMM
CLoadTransformOp const& sC_load_op, // transforms C values before use in GEMM
CStoreTransformOp const& sC_store_op) // transforms results before they are stored to C
{
// ThrMMA
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
cooperative_gemm_predication(thr_mma, alpha, sA, sB, beta, sC, sA_load_op, sB_load_op, sC_load_op, sC_store_op);
}
// Unpredicated Cooperative GEMM
template <class SmemCopyOpA, class SmemCopyOpB, class SmemCopyOpC,
class... Args,
class Alpha, class TA, class ALayout, class TB, class BLayout,
class Beta, class TC, class CLayout,
class ALoadTransformOp, class BLoadTransformOp,
class CLoadTransformOp, class CStoreTransformOp,
__CUTE_REQUIRES(ALayout::rank == 2 && is_smem<TA>::value &&
BLayout::rank == 2 && is_smem<TB>::value &&
CLayout::rank == 2 && is_smem<TC>::value)>
CUTE_HOST_DEVICE
void
cooperative_gemm_no_predication(uint32_t thread_idx,
TiledMMA<Args...> const& tiled_mma,
Alpha const& alpha,
Tensor<TA, ALayout> sA,
Tensor<TB, BLayout> sB,
Beta const& beta,
Tensor<TC, CLayout> sC,
ALoadTransformOp const& sA_load_op, // transforms A values before use in GEMM
BLoadTransformOp const& sB_load_op, // transforms B values before use in GEMM
CLoadTransformOp const& sC_load_op, // transforms C values before use in GEMM
CStoreTransformOp const& sC_store_op) // transforms results before they are stored to C
{
using TypeA = typename TA::value_type;
using TypeB = typename TB::value_type;
using TypeC = typename TC::value_type;
// ThrMMA
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
//
// MMA Partitioning
//
Tensor tCsC = thr_mma.partition_C(sC);
// Create register tensors for the MMA to operate on
Tensor tCrA = thr_mma.partition_fragment_A(sA); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.partition_fragment_B(sB); // (MMA,MMA_N,MMA_K)
Tensor tCrC = thr_mma.make_fragment_C(tCsC); // (MMA,MMA_M,MMA_N)
using CopyOpAType = SmemCopyOpA;
using CopyOpBType = SmemCopyOpB;
auto smem_tiled_copy_A = make_tiled_copy_A(Copy_Atom<CopyOpAType, TypeA>{}, thr_mma);
auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(thread_idx);
Tensor tCsA = smem_thr_copy_A.partition_S(sA);
Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA);
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K
auto smem_tiled_copy_B = make_tiled_copy_B(Copy_Atom<CopyOpBType, TypeB>{}, thr_mma);
auto smem_thr_copy_B = smem_tiled_copy_B.get_thread_slice(thread_idx);
Tensor tCsB = smem_thr_copy_B.partition_S(sB);
Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB);
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // CPY_N
CUTE_STATIC_ASSERT_V(size<2>(tCsB) == size<2>(tCrB_copy_view)); // CPY_K
#if 0
if (thread0()) {
print(" sA: "); print(sA); print("\n");
print(" sB: "); print(sB); print("\n");
print(" sC: "); print(sC); print("\n");
print(thr_mma); print("\n");
print("tCsC: "); print(tCsC); print("\n");
print("tCrA: "); print(tCrA); print("\n");
print("tCrB: "); print(tCrB); print("\n");
print("tCrC: "); print(tCrC); print("\n");
print(smem_thr_copy_A); print("\n");
print("tCsA: "); print(tCsA); print("\n");
print("tCrA_copy_view: "); print(tCrA_copy_view); print("\n");
print(smem_thr_copy_B); print("\n");
print("tCsB: "); print(tCsB); print("\n");
print("tCrB_copy_view: "); print(tCrB_copy_view); print("\n");
}
#endif
//
// PREFETCH
//
copy(smem_tiled_copy_A, tCsA(_,_,Int<0>{}), tCrA_copy_view(_,_,Int<0>{}));
copy(smem_tiled_copy_B, tCsB(_,_,Int<0>{}), tCrB_copy_view(_,_,Int<0>{}));
//
// MAINLOOP
//
// Clear accumulators
clear(tCrC);
constexpr int K_BLOCK_MAX = size<2>(tCrA);
CUTE_UNROLL
for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block)
{
// static-if load the next k_block. No k-predication required on these loads.
if (k_block < K_BLOCK_MAX-1)
{
// Load the next k_block
int k_next = k_block + 1; // statically unrolled
copy(smem_tiled_copy_A, tCsA(_,_,k_next), tCrA_copy_view(_,_,k_next));
copy(smem_tiled_copy_B, tCsB(_,_,k_next), tCrB_copy_view(_,_,k_next));
}
// Transform A and B, relying on the compiler to remove in case of identity ops
cute::transform(tCrA(_,_,k_block), sA_load_op);
cute::transform(tCrB(_,_,k_block), sB_load_op);
// GEMM on k_block in registers
gemm(thr_mma, tCrA(_,_,k_block), tCrB(_,_,k_block), tCrC);
}
//
// Epilogue
//
auto isBetaZero = [&] () {
if constexpr (is_complex<Beta>::value) {
return beta.real() == Int<0>{} && beta.imag() == Int<0>{};
}
else {
return beta == Int<0>{};
}
CUTE_GCC_UNREACHABLE;
} ();
using CopyOpCType = SmemCopyOpC;
Tensor tCrD = thr_mma.make_fragment_C(tCsC);
if(!isBetaZero) {
copy(CopyOpCType{}, tCsC, tCrD);
// Transform C on/after load
cute::transform(tCrD, sC_load_op);
}
// C = alpha * (A * B) + beta * C
axpby(alpha, tCrC, beta, tCrD);
// Transform C before/on store
cute::transform(tCrD, sC_store_op);
copy(CopyOpCType{}, tCrD, tCsC);
}
} // end namespace detail
template <class SmemCopyOpA, class SmemCopyOpB, class SmemCopyOpC,
class... Args,
class Alpha, class TA, class ALayout, class TB, class BLayout,
class Beta, class TC, class CLayout,
class ALoadTransformOp = cute::identity, class BLoadTransformOp = cute::identity,
class CLoadTransformOp = cute::identity, class CStoreTransformOp = cute::identity,
__CUTE_REQUIRES(ALayout::rank == 2 && is_smem<TA>::value &&
BLayout::rank == 2 && is_smem<TB>::value &&
CLayout::rank == 2 && is_smem<TC>::value)>
CUTE_HOST_DEVICE
void
cooperative_gemm(uint32_t thread_idx,
TiledMMA<Args...> const& tiled_mma,
Alpha const& alpha,
Tensor<TA, ALayout> sA,
Tensor<TB, BLayout> sB,
Beta const& beta,
Tensor<TC, CLayout> sC,
ALoadTransformOp const& sA_load_op = {}, // transforms A values before use in GEMM
BLoadTransformOp const& sB_load_op = {}, // transforms B values before use in GEMM
CLoadTransformOp const& sC_load_op = {}, // transforms C values before use in GEMM
CStoreTransformOp const& sC_store_op = {}) // transforms results before they are stored to C
{
CUTE_STATIC_ASSERT_V(size<0>(sA) == size<0>(sC)); // AM == CM
CUTE_STATIC_ASSERT_V(size<0>(sB) == size<1>(sC)); // BN == CN
CUTE_STATIC_ASSERT_V(size<1>(sA) == size<1>(sB)); // AK == BK
using TypeA = typename TA::value_type;
using TypeB = typename TB::value_type;
using TypeC = typename TC::value_type;
static_assert(is_convertible_v<decay_t<invoke_result_t<ALoadTransformOp, TypeA>>, TypeA>,
"ALoadTransformOp functor must accept value of type TA::value_type and return value convertible to type TA::value_type");
static_assert(is_convertible_v<decay_t<invoke_result_t<BLoadTransformOp, TypeB>>, TypeB>,
"BLoadTransformOp functor must accept value of type TB::value_type and return value convertible to type TB::value_type");
static_assert(is_convertible_v<decay_t<invoke_result_t<CLoadTransformOp, TypeC>>, TypeC>,
"CLoadTransformOp functor must accept value of type TC::value_type and return value convertible to type TC::value_type");
static_assert(is_convertible_v<decay_t<invoke_result_t<CStoreTransformOp, TypeC>>, TypeC>,
"CStoreTransformOp functor must accept value of type TC::value_type and return value convertible to type TC::value_type");
static constexpr bool compat = weakly_compatible(tile_shape(TiledMMA<Args...>{}),
make_shape(size<0>(sA), size<0>(sB), size<1>(sA)));
if constexpr (compat) {
detail::cooperative_gemm_no_predication<SmemCopyOpA, SmemCopyOpB, SmemCopyOpC>(
thread_idx, tiled_mma, alpha, sA, sB, beta, sC,
sA_load_op, sB_load_op, sC_load_op, sC_store_op
);
} else {
detail::cooperative_gemm_predication(
thread_idx, tiled_mma, alpha, sA, sB, beta, sC,
sA_load_op, sB_load_op, sC_load_op, sC_store_op
);
}
}
template <class... Args,
class Alpha, class TA, class ALayout, class TB, class BLayout,
class Beta, class TC, class CLayout,
class ALoadTransformOp = cute::identity, class BLoadTransformOp = cute::identity,
class CLoadTransformOp = cute::identity, class CStoreTransformOp = cute::identity,
__CUTE_REQUIRES(ALayout::rank == 2 && is_smem<TA>::value &&
BLayout::rank == 2 && is_smem<TB>::value &&
CLayout::rank == 2 && is_smem<TC>::value)>
CUTE_HOST_DEVICE
void
cooperative_gemm(uint32_t thread_idx,
TiledMMA<Args...> const& tiled_mma,
Alpha const& alpha,
Tensor<TA, ALayout> sA,
Tensor<TB, BLayout> sB,
Beta const& beta,
Tensor<TC, CLayout> sC,
ALoadTransformOp const& sA_load_op = {}, // transforms A values before use in GEMM
BLoadTransformOp const& sB_load_op = {}, // transforms B values before use in GEMM
CLoadTransformOp const& sC_load_op = {}, // transforms C values before use in GEMM
CStoreTransformOp const& sC_store_op = {}) // transforms results before they are stored to C
{
using CopyOpA = AutoVectorizingCopyWithAssumedAlignment<sizeof_bits_v<typename TA::value_type>>;
using CopyOpB = AutoVectorizingCopyWithAssumedAlignment<sizeof_bits_v<typename TB::value_type>>;
using CopyOpC = AutoVectorizingCopyWithAssumedAlignment<sizeof_bits_v<typename TC::value_type>>;
cooperative_gemm<CopyOpA, CopyOpB, CopyOpC>(
thread_idx, tiled_mma, alpha, sA, sB, beta, sC,
sA_load_op, sB_load_op, sC_load_op, sC_store_op
);
}
// Legacy overload of cute::gemm for backwards-compatibility
template <class... Args,
class Alpha, class TA, class ALayout, class TB, class BLayout,
class Beta, class TC, class CLayout,
class ALoadTransformOp = cute::identity, class BLoadTransformOp = cute::identity,
class CLoadTransformOp = cute::identity, class CStoreTransformOp = cute::identity,
__CUTE_REQUIRES(ALayout::rank == 2 && is_smem<TA>::value &&
BLayout::rank == 2 && is_smem<TB>::value &&
CLayout::rank == 2 && is_smem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(ThrMMA<Args...> const& thr_mma,
Alpha const& alpha,
Tensor<TA, ALayout> sA,
Tensor<TB, BLayout> sB,
Beta const& beta,
Tensor<TC, CLayout> sC,
ALoadTransformOp const& sA_load_op = {}, // transforms A values before use in GEMM
BLoadTransformOp const& sB_load_op = {}, // transforms B values before use in GEMM
CLoadTransformOp const& sC_load_op = {}, // transforms C values before use in GEMM
CStoreTransformOp const& sC_store_op = {}) // transforms results before they are stored to C
{
// Goes directly to the slow path to avoid getting thread_idx from thr_mma
detail::cooperative_gemm_predication(
thr_mma, alpha, sA, sB, beta, sC,
sA_load_op, sB_load_op, sC_load_op, sC_store_op
);
}
} // end namespace cute
| include/cute/algorithm/cooperative_gemm.hpp/0 | {
"file_path": "include/cute/algorithm/cooperative_gemm.hpp",
"repo_id": "include",
"token_count": 10531
} | 11 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/copy.hpp>
#include <cute/arch/copy_sm90.hpp>
namespace cute
{
////////////////////////////////////////////////////////////////////////////////////////////////////
/// TMA_LOAD : Initiates a TMA copy from global memory to shared memory
////////////////////////////////////////////////////////////////////////////////////////////////////
struct SM90_TMA_LOAD_1D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint64_t cache_hint,
void * smem_ptr,
int32_t const& crd0)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.1d.shared::cluster.global.mbarrier::complete_tx::bytes.L2::cache_hint"
" [%0], [%1, {%3}], [%2], %4;"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"r"(crd0), "l"(cache_hint)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
struct PREFETCH
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& crd0)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
asm volatile (
"cp.async.bulk.prefetch.tensor.1d.L2.global"
" [%0, {%1}];"
:
: "l"(gmem_int_desc),
"r"(crd0)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
};
struct SM90_TMA_LOAD_2D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint64_t cache_hint,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes.L2::cache_hint"
" [%0], [%1, {%3, %4}], [%2], %5;"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"r"(crd0), "r"(crd1), "l"(cache_hint)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
struct PREFETCH
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& crd0, int32_t const& crd1)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
asm volatile (
"cp.async.bulk.prefetch.tensor.2d.L2.global"
" [%0, {%1, %2}];"
:
: "l"(gmem_int_desc),
"r"(crd0), "r"(crd1)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
};
struct SM90_TMA_LOAD_3D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint64_t cache_hint,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes.L2::cache_hint"
" [%0], [%1, {%3, %4, %5}], [%2], %6;"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"r"(crd0), "r"(crd1), "r"(crd2), "l"(cache_hint)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
struct PREFETCH
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
asm volatile (
"cp.async.bulk.prefetch.tensor.3d.L2.global"
" [%0, {%1, %2, %3}];"
:
: "l"(gmem_int_desc),
"r"(crd0), "r"(crd1), "r"(crd2)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
};
struct SM90_TMA_LOAD_4D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint64_t cache_hint,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes.L2::cache_hint"
" [%0], [%1, {%3, %4, %5, %6}], [%2], %7;"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3), "l"(cache_hint)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
struct PREFETCH
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
asm volatile (
"cp.async.bulk.prefetch.tensor.4d.L2.global"
" [%0, {%1, %2, %3, %4}];"
:
: "l"(gmem_int_desc),
"r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
};
struct SM90_TMA_LOAD_5D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint64_t cache_hint,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes.L2::cache_hint"
" [%0], [%1, {%3, %4, %5, %6, %7}], [%2], %8;"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3), "r"(crd4), "l"(cache_hint)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
struct PREFETCH
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
asm volatile (
"cp.async.bulk.prefetch.tensor.5d.L2.global"
" [%0, {%1, %2, %3, %4, %5}];"
:
: "l"(gmem_int_desc),
"r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3), "r"(crd4)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
};
struct SM90_TMA_LOAD
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint64_t cache_hint,
void * smem_ptr,
int32_t const& crd0)
{
return SM90_TMA_LOAD_1D::copy(desc_ptr, mbar_ptr, cache_hint, smem_ptr, crd0);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint64_t cache_hint,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1)
{
return SM90_TMA_LOAD_2D::copy(desc_ptr, mbar_ptr, cache_hint, smem_ptr, crd0, crd1);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint64_t cache_hint,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2)
{
return SM90_TMA_LOAD_3D::copy(desc_ptr, mbar_ptr, cache_hint, smem_ptr, crd0, crd1, crd2);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint64_t cache_hint,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3)
{
return SM90_TMA_LOAD_4D::copy(desc_ptr, mbar_ptr, cache_hint, smem_ptr, crd0, crd1, crd2, crd3);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint64_t cache_hint,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4)
{
return SM90_TMA_LOAD_5D::copy(desc_ptr, mbar_ptr, cache_hint, smem_ptr, crd0, crd1, crd2, crd3, crd4);
}
struct PREFETCH
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& crd0)
{
return SM90_TMA_LOAD_1D::PREFETCH::copy(desc_ptr, crd0);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& crd0, int32_t const& crd1)
{
return SM90_TMA_LOAD_2D::PREFETCH::copy(desc_ptr, crd0, crd1);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2)
{
return SM90_TMA_LOAD_3D::PREFETCH::copy(desc_ptr, crd0, crd1, crd2);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3)
{
return SM90_TMA_LOAD_4D::PREFETCH::copy(desc_ptr, crd0, crd1, crd2, crd3);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4)
{
return SM90_TMA_LOAD_5D::PREFETCH::copy(desc_ptr, crd0, crd1, crd2, crd3, crd4);
}
};
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// TMA_LOAD im2col: Initiates a TMA copy, in im2col mode, from global memory to shared memory
////////////////////////////////////////////////////////////////////////////////////////////////////
struct SM90_TMA_LOAD_IM2COL_3D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr,
void * smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n,
uint16_t const& offset_w)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
// Copy from global to shared::cluster.
asm volatile (
"cp.async.bulk.tensor.3d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes"
" [%0], [%1, {%3, %4, %5}], [%2], {%6};"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"r"(coord_c), "r"(coord_w), "r"(coord_n),
"h"(offset_w)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
struct PREFETCH
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n,
uint16_t const& offset_w)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
asm volatile (
"cp.async.bulk.prefetch.tensor.3d.L2.global.im2col"
" [%0, {%1, %2, %3}], {%4};"
:
: "l"(gmem_int_desc),
"r"(coord_c), "r"(coord_w), "r"(coord_n),
"h"(offset_w)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
};
struct SM90_TMA_LOAD_IM2COL_4D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr,
void * smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n,
uint16_t const& offset_w, uint16_t const& offset_h)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
// Copy from global to shared::cluster.
asm volatile (
"cp.async.bulk.tensor.4d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes"
" [%0], [%1, {%3, %4, %5, %6}], [%2], {%7, %8};"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_n),
"h"(offset_w), "h"(offset_h)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
struct PREFETCH
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n,
uint16_t const& offset_w, uint16_t const& offset_h)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
asm volatile (
"cp.async.bulk.prefetch.tensor.4d.L2.global.im2col"
" [%0, {%1, %2, %3, %4}], {%5, %6};"
:
: "l"(gmem_int_desc),
"r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_n),
"h"(offset_w), "h"(offset_h)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
};
struct SM90_TMA_LOAD_IM2COL_5D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr,
void * smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n,
uint16_t const& offset_w, uint16_t const& offset_h, uint16_t const& offset_d)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
// Copy from global to shared::cluster.
asm volatile (
"cp.async.bulk.tensor.5d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes"
" [%0], [%1, {%3, %4, %5, %6, %7}], [%2], {%8, %9, %10};"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_d), "r"(coord_n),
"h"(offset_w), "h"(offset_h), "h"(offset_d)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
struct PREFETCH
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n,
uint16_t const& offset_w, uint16_t const& offset_h, uint16_t const& offset_d)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
asm volatile (
"cp.async.bulk.prefetch.tensor.5d.L2.global.im2col"
" [%0, {%1, %2, %3, %4, %5}], {%6, %7, %8};"
:
: "l"(gmem_int_desc),
"r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_d), "r"(coord_n),
"h"(offset_w), "h"(offset_h), "h"(offset_d)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
};
struct SM90_TMA_LOAD_IM2COL
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr,
void * smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n,
uint16_t const& offset_w)
{
return SM90_TMA_LOAD_IM2COL_3D::copy(desc_ptr, mbar_ptr, smem_ptr,
coord_c, coord_w, coord_n,
offset_w);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr,
void * smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n,
uint16_t const& offset_w, uint16_t const& offset_h)
{
return SM90_TMA_LOAD_IM2COL_4D::copy(desc_ptr, mbar_ptr, smem_ptr,
coord_c, coord_w, coord_h, coord_n,
offset_w, offset_h);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr,
void * smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n,
uint16_t const& offset_w, uint16_t const& offset_h, uint16_t const& offset_d)
{
return SM90_TMA_LOAD_IM2COL_5D::copy(desc_ptr, mbar_ptr, smem_ptr,
coord_c, coord_w, coord_h, coord_d, coord_n,
offset_w, offset_h, offset_d);
}
struct PREFETCH
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n,
uint16_t const& offset_w)
{
return SM90_TMA_LOAD_IM2COL_3D::PREFETCH::copy(desc_ptr,
coord_c, coord_w, coord_n,
offset_w);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n,
uint16_t const& offset_w, uint16_t const& offset_h)
{
return SM90_TMA_LOAD_IM2COL_4D::PREFETCH::copy(desc_ptr,
coord_c, coord_w, coord_h, coord_n,
offset_w, offset_h);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n,
uint16_t const& offset_w, uint16_t const& offset_h, uint16_t const& offset_d)
{
return SM90_TMA_LOAD_IM2COL_5D::PREFETCH::copy(desc_ptr,
coord_c, coord_w, coord_h, coord_d, coord_n,
offset_w, offset_h, offset_d);
}
};
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// TMA_LOAD_MULTICAST: Initiates a TMA copy from global memory to shared memory
////////////////////////////////////////////////////////////////////////////////////////////////////
struct SM90_TMA_LOAD_MULTICAST_1D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& crd0)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.1d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster"
" [%0], [%1, {%4}], [%2], %3;"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"h"(multicast_mask),
"r"(crd0)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_LOAD_MULTICAST_2D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster"
" [%0], [%1, {%4, %5}], [%2], %3;"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"h"(multicast_mask),
"r"(crd0), "r"(crd1)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_LOAD_MULTICAST_3D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster"
" [%0], [%1, {%4, %5, %6}], [%2], %3;"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"h"(multicast_mask),
"r"(crd0), "r"(crd1), "r"(crd2)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_LOAD_MULTICAST_4D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster"
" [%0], [%1, {%4, %5, %6, %7}], [%2], %3;"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"h"(multicast_mask),
"r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_LOAD_MULTICAST_5D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster"
" [%0], [%1, {%4, %5, %6, %7, %8}], [%2], %3;"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"h"(multicast_mask),
"r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3), "r"(crd4)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_LOAD_MULTICAST
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& crd0)
{
return SM90_TMA_LOAD_MULTICAST_1D::copy(desc_ptr, mbar_ptr, multicast_mask, smem_ptr, crd0);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1)
{
return SM90_TMA_LOAD_MULTICAST_2D::copy(desc_ptr, mbar_ptr, multicast_mask, smem_ptr, crd0, crd1);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2)
{
return SM90_TMA_LOAD_MULTICAST_3D::copy(desc_ptr, mbar_ptr, multicast_mask, smem_ptr, crd0, crd1, crd2);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3)
{
return SM90_TMA_LOAD_MULTICAST_4D::copy(desc_ptr, mbar_ptr, multicast_mask, smem_ptr, crd0, crd1, crd2, crd3);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4)
{
return SM90_TMA_LOAD_MULTICAST_5D::copy(desc_ptr, mbar_ptr, multicast_mask, smem_ptr, crd0, crd1, crd2, crd3, crd4);
}
using PREFETCH = typename SM90_TMA_LOAD::PREFETCH;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// TMA_LOAD_MULTICAST im2col: Initiates a TMA copy, in im2col mode, from global memory to shared memory
////////////////////////////////////////////////////////////////////////////////////////////////////
struct SM90_TMA_LOAD_IM2COL_MULTICAST_3D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n,
uint16_t const& offset_w)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
// Copy from global to shared::cluster.
asm volatile (
"cp.async.bulk.tensor.3d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster"
" [%0], [%1, {%3, %4, %5}], [%2], {%6}, %7;"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"r"(coord_c), "r"(coord_w), "r"(coord_n),
"h"(offset_w),
"h"(multicast_mask)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_LOAD_IM2COL_MULTICAST_4D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n,
uint16_t const& offset_w, uint16_t const& offset_h)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
// Copy from global to shared::cluster.
asm volatile (
"cp.async.bulk.tensor.4d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster"
" [%0], [%1, {%3, %4, %5, %6}], [%2], {%7, %8}, %9;"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_n),
"h"(offset_w), "h"(offset_h),
"h"(multicast_mask)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_LOAD_IM2COL_MULTICAST_5D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n,
uint16_t const& offset_w, uint16_t const& offset_h, uint16_t const& offset_d)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
// Copy from global to shared::cluster.
asm volatile (
"cp.async.bulk.tensor.5d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster"
" [%0], [%1, {%3, %4, %5, %6, %7}], [%2], {%8, %9, %10}, %11;"
:
: "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar),
"r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_d), "r"(coord_n),
"h"(offset_w), "h"(offset_h), "h"(offset_d),
"h"(multicast_mask)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_LOAD_IM2COL_MULTICAST
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n,
uint16_t const& offset_w)
{
return SM90_TMA_LOAD_IM2COL_MULTICAST_3D::copy(desc_ptr, mbar_ptr, multicast_mask,
smem_ptr,
coord_c, coord_w, coord_n,
offset_w);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n,
uint16_t const& offset_w, uint16_t const& offset_h)
{
return SM90_TMA_LOAD_IM2COL_MULTICAST_4D::copy(desc_ptr, mbar_ptr, multicast_mask,
smem_ptr,
coord_c, coord_w, coord_h, coord_n,
offset_w, offset_h);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask,
void * smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n,
uint16_t const& offset_w, uint16_t const& offset_h, uint16_t const& offset_d)
{
return SM90_TMA_LOAD_IM2COL_MULTICAST_5D::copy(desc_ptr, mbar_ptr, multicast_mask,
smem_ptr,
coord_c, coord_w, coord_h, coord_d, coord_n,
offset_w, offset_h, offset_d);
}
using PREFETCH = typename SM90_TMA_LOAD_IM2COL::PREFETCH;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// TMA_STORE : Initiates a TMA copy from shared memory to global memory
////////////////////////////////////////////////////////////////////////////////////////////////////
struct SM90_TMA_STORE_1D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& crd0)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.1d.global.shared::cta.bulk_group [%0, {%2}], [%1];"
:
: "l"(gmem_int_desc), "r"(smem_int_ptr),
"r"(crd0)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_STORE_2D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& crd0, int32_t const& crd1)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.2d.global.shared::cta.bulk_group [%0, {%2, %3}], [%1];"
:
: "l"(gmem_int_desc), "r"(smem_int_ptr),
"r"(crd0), "r"(crd1)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_STORE_3D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.3d.global.shared::cta.bulk_group [%0, {%2, %3, %4}], [%1];"
:
: "l"(gmem_int_desc), "r"(smem_int_ptr),
"r"(crd0), "r"(crd1), "r"(crd2)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_STORE_4D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.4d.global.shared::cta.bulk_group [%0, {%2, %3, %4, %5}], [%1];"
:
: "l"(gmem_int_desc), "r"(smem_int_ptr),
"r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_STORE_5D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.5d.global.shared::cta.bulk_group [%0, {%2, %3, %4, %5, %6}], [%1];"
:
: "l"(gmem_int_desc), "r"(smem_int_ptr),
"r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3), "r"(crd4)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_STORE
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& crd0)
{
return SM90_TMA_STORE_1D::copy(desc_ptr, smem_ptr, crd0);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& crd0, int32_t const& crd1)
{
return SM90_TMA_STORE_2D::copy(desc_ptr, smem_ptr, crd0, crd1);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2)
{
return SM90_TMA_STORE_3D::copy(desc_ptr, smem_ptr, crd0, crd1, crd2);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3)
{
return SM90_TMA_STORE_4D::copy(desc_ptr, smem_ptr, crd0, crd1, crd2, crd3);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4)
{
return SM90_TMA_STORE_5D::copy(desc_ptr, smem_ptr, crd0, crd1, crd2, crd3, crd4);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// TMA_STORE im2col: Initiates a TMA copy, in im2col mode, from shared memory to global memory
////////////////////////////////////////////////////////////////////////////////////////////////////
struct SM90_TMA_STORE_IM2COL_3D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.3d.global.shared::cta.im2col_no_offs.bulk_group"
" [%0, {%2, %3, %4}], [%1];"
:
: "l"(gmem_int_desc), "r"(smem_int_ptr),
"r"(coord_c), "r"(coord_w), "r"(coord_n)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_STORE_IM2COL_4D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.4d.global.shared::cta.im2col_no_offs.bulk_group"
" [%0, {%2, %3, %4, %5}], [%1];"
:
: "l"(gmem_int_desc), "r"(smem_int_ptr),
"r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_n)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_STORE_IM2COL_5D
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.async.bulk.tensor.5d.global.shared::cta.im2col_no_offs.bulk_group"
" [%0, {%2, %3, %4, %5, %6}], [%1];"
:
: "l"(gmem_int_desc), "r"(smem_int_ptr),
"r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_d), "r"(coord_n)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_STORE_IM2COL
{
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n)
{
return SM90_TMA_STORE_IM2COL_3D::copy(desc_ptr, smem_ptr, coord_c, coord_w, coord_n);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n)
{
return SM90_TMA_STORE_IM2COL_4D::copy(desc_ptr, smem_ptr, coord_c, coord_w, coord_h, coord_n);
}
CUTE_HOST_DEVICE static void
copy(void const* desc_ptr,
void const* smem_ptr,
int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n)
{
return SM90_TMA_STORE_IM2COL_5D::copy(desc_ptr, smem_ptr, coord_c, coord_w, coord_h, coord_d, coord_n);
}
};
// Fence for smem stores for subsequent TMA_STORE
CUTE_HOST_DEVICE static void
tma_store_fence() {
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
asm volatile ("fence.proxy.async.shared::cta;");
#elif defined(__CUDA_ARCH__)
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
// Indicate arrival of warp issuing TMA_STORE
CUTE_HOST_DEVICE static void
tma_store_arrive() {
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
asm volatile("cp.async.bulk.commit_group;");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
// Wait until at most Count committed TMA_STOREs are pending and all prior commits are complete
template <int Count>
CUTE_HOST_DEVICE static void
tma_store_wait() {
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
asm volatile(
"cp.async.bulk.wait_group.read %0;"
:
: "n"(Count)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// TMA_REDUCE_ADD : Initiates a TMA reduce-add from shared memory to global memory
////////////////////////////////////////////////////////////////////////////////////////////////////
struct SM90_TMA_REDUCE_ADD_1D
{
CUTE_HOST_DEVICE static void
copy(void const* const desc_ptr,
void const* const smem_ptr,
int32_t const& crd0)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.reduce.async.bulk.tensor.1d.global.shared::cta.add.bulk_group [%0, {%2}], [%1];"
:
: "l"(gmem_int_desc), "r"(smem_int_ptr),
"r"(crd0)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_REDUCE_ADD_2D
{
CUTE_HOST_DEVICE static void
copy(void const* const desc_ptr,
void const* const smem_ptr,
int32_t const& crd0, int32_t const& crd1)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.reduce.async.bulk.tensor.2d.global.shared::cta.add.bulk_group [%0, {%2, %3}], [%1];"
:
: "l"(gmem_int_desc), "r"(smem_int_ptr),
"r"(crd0), "r"(crd1)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_REDUCE_ADD_3D
{
CUTE_HOST_DEVICE static void
copy(void const* const desc_ptr,
void const* const smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.reduce.async.bulk.tensor.3d.global.shared::cta.add.bulk_group [%0, {%2, %3, %4}], [%1];"
:
: "l"(gmem_int_desc), "r"(smem_int_ptr),
"r"(crd0), "r"(crd1), "r"(crd2)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_REDUCE_ADD_4D
{
CUTE_HOST_DEVICE static void
copy(void const* const desc_ptr,
void const* const smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.reduce.async.bulk.tensor.4d.global.shared::cta.add.bulk_group [%0, {%2, %3, %4, %5}], [%1];"
:
: "l"(gmem_int_desc), "r"(smem_int_ptr),
"r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_REDUCE_ADD_5D
{
CUTE_HOST_DEVICE static void
copy(void const* const desc_ptr,
void const* const smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile (
"cp.reduce.async.bulk.tensor.5d.global.shared::cta.add.bulk_group [%0, {%2, %3, %4, %5, %6}], [%1];"
:
: "l"(gmem_int_desc), "r"(smem_int_ptr),
"r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3), "r"(crd4)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_TMA_REDUCE_ADD
{
CUTE_HOST_DEVICE static void
copy(void const* const desc_ptr,
void const* const smem_ptr,
int32_t const& crd0)
{
return SM90_TMA_REDUCE_ADD_1D::copy(desc_ptr, smem_ptr, crd0);
}
CUTE_HOST_DEVICE static void
copy(void const* const desc_ptr,
void const* const smem_ptr,
int32_t const& crd0, int32_t const& crd1)
{
return SM90_TMA_REDUCE_ADD_2D::copy(desc_ptr, smem_ptr, crd0, crd1);
}
CUTE_HOST_DEVICE static void
copy(void const* const desc_ptr,
void const* const smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2)
{
return SM90_TMA_REDUCE_ADD_3D::copy(desc_ptr, smem_ptr, crd0, crd1, crd2);
}
CUTE_HOST_DEVICE static void
copy(void const* const desc_ptr,
void const* const smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3)
{
return SM90_TMA_REDUCE_ADD_4D::copy(desc_ptr, smem_ptr, crd0, crd1, crd2, crd3);
}
CUTE_HOST_DEVICE static void
copy(void const* const desc_ptr,
void const* const smem_ptr,
int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4)
{
return SM90_TMA_REDUCE_ADD_5D::copy(desc_ptr, smem_ptr, crd0, crd1, crd2, crd3, crd4);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// BULK_COPY : Copy a bulk of memory between shared memory and global memory
////////////////////////////////////////////////////////////////////////////////////////////////////
struct SM90_BULK_COPY_G2S
{
CUTE_HOST_DEVICE static void
copy(void const* gmem_ptr, uint64_t* mbar_ptr,
void * smem_ptr, int32_t load_bytes)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr);
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile("cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%0], [%1], %2, [%3];\n"
:
: "r"(smem_int_ptr), "l"(gmem_ptr), "r"(load_bytes), "r"(smem_int_mbar)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use BULK_COPY without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
struct PREFETCH
{
CUTE_HOST_DEVICE static void
copy(void const* gmem_ptr, int32_t load_bytes)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
asm volatile("cp.async.bulk.prefetch.L2.global [%0], %1;\n"
:
: "l"(gmem_ptr), "r"(load_bytes)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use BULK_COPY without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
};
struct SM90_BULK_COPY_S2G
{
CUTE_HOST_DEVICE static void
copy(void const* smem_ptr,
void * gmem_ptr, int32_t store_bytes)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr);
asm volatile("cp.async.bulk.global.shared::cta.bulk_group [%0], [%1], %2;\n"
:
: "l"(gmem_ptr), "r"(smem_int_ptr), "r"(store_bytes)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use BULK_COPY without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
};
struct SM90_BULK_COPY_AUTO {};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // end namespace cute
| include/cute/arch/copy_sm90_tma.hpp/0 | {
"file_path": "include/cute/arch/copy_sm90_tma.hpp",
"repo_id": "include",
"token_count": 24643
} | 12 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/*! \file
\brief im2col make_tma_copy
*/
#include "cute/arch/copy_sm90.hpp"
#include "cute/arch/copy_sm90_desc.hpp"
#include "cute/tensor.hpp"
#include "cute/algorithm/prefetch.hpp"
#include "cutlass/fast_math.h"
namespace cute
{
// Utility for unpacking TMA_LOAD_IM2COL arguments into a CopyOp
template <class CopyOp>
struct TMA_LOAD_IM2COL_Unpack
{
/// Copy from src to dst.
///
/// @param traits Copy traits created with a TMA descriptor that
/// correctly matches the input tensor and other convolution
/// parameters.
///
/// @param src Tile of the im2col-transformed coordinate tensor
/// (result of get_tma_tensor), representing the global-memory
/// tensor from which to load.
///
/// @param dst Shared memory tile, into which to load.
template <class... Args,
class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr void
copy_unpack(Copy_Traits<CopyOp, Args...> const& traits,
Tensor<TS,SLayout> const& src, // tile of the transformed global activation (A) tensor
Tensor<TD,DLayout> & dst) // shared memory tile
{
auto src_coord_offset = src(Int<0>{});
auto src_coord_cwhdn_offset_srt = flatten(src_coord_offset);
// Interpret the TMA IM2COL coordinate as (c, ([w,h,d]), n, ([s,r,t]))
CUTE_STATIC_ASSERT_V(rank(src_coord_offset) == _4{});
CUTE_STATIC_ASSERT_V(rank<1>(src_coord_offset) == rank<3>(src_coord_offset));
if constexpr (detail::is_prefetch<CopyOp>) {
return detail::explode_tuple(detail::CallCOPY<CopyOp>{},
traits.opargs_, tuple_seq<decltype(traits.opargs_)>{},
src_coord_cwhdn_offset_srt, tuple_seq<decltype(src_coord_cwhdn_offset_srt)>{});
} else {
static_assert(is_smem<TD>::value, "SM90_TMA_LOAD_IM2COL requires the destination be shared memory.");
void* dst_ptr = cute::raw_pointer_cast(dst.data());
return detail::explode_tuple(detail::CallCOPY<CopyOp>{},
traits.opargs_, tuple_seq<decltype(traits.opargs_)>{},
make_tuple(dst_ptr), seq<0>{},
src_coord_cwhdn_offset_srt, tuple_seq<decltype(src_coord_cwhdn_offset_srt)>{});
}
}
};
// Copy_Traits for SM90 im2col TMA load comes in two layers.
//
// 1. Copy_Traits<SM90_TMA_LOAD_IM2COL>
// 2. Copy_Traits<SM90_TMA_LOAD_IM2COL_OP>
//
// Copy_Traits<SM90_TMA_LOAD_IM2COL>
// is the "outer" layer. It has a TMA descriptor,
// but no barrier ("tma_mbar"), so it's "nonexecutable."
// One calls its "with" member function with a barrier,
// to get an executable "inner"-layer
// Copy_Traits<SM90_TMA_LOAD_IM2COL_OP> object.
// That object's "copy_unpack" member function
// actually invokes im2col TMA load.
struct SM90_TMA_LOAD_IM2COL_OP : SM90_TMA_LOAD_IM2COL {};
/// @brief Non-executable specialization of Copy_Traits for SM90
/// im2col TMA load, with TMA descriptor but no barrier.
///
/// Use `.with(memory_barrier)` to construct an executable version.
template <class NumBitsPerTMA, class TMATensor>
struct Copy_Traits<SM90_TMA_LOAD_IM2COL, NumBitsPerTMA, TMATensor>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1, NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1, NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
Im2ColTmaDescriptor tma_desc_;
TMATensor tma_tensor_;
CUTE_HOST_DEVICE constexpr
Im2ColTmaDescriptor const*
get_tma_descriptor() const
{
return &tma_desc_;
}
template <class GShape>
CUTE_HOST_DEVICE constexpr
TMATensor const
get_tma_tensor(GShape const&) const
{
return tma_tensor_;
}
/// @brief Get an executable specialization.
///
/// Copy_Traits specializations with SM90_TMA_LOAD_IM2COL are not
/// directly executable. Instead, call this "with" member function
/// to get an executable specialization. "Executable" means that
/// @c copy_unpack works.
///
/// @param tma_mbar Memory barrier for synchronization
///
/// @param multicast_mask Multicast mask (unused; only exists
/// for interface compatibility with the actual multicast Copy_Traits)
///
/// @return Executable specialization of @c Copy_Traits
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM90_TMA_LOAD_IM2COL_OP, NumBitsPerTMA>
with(uint64_t& tma_mbar, [[maybe_unused]] uint16_t const& multicast_mask = 0) const
{
return {{}, {&tma_desc_, &tma_mbar}};
}
// Copy_Traits specializations with SM90_TMA_LOAD_IM2COL
// are not directly executable. Instead, call .with
// to get an executable specialization.
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst) = delete;
};
/// @brief Executable specialization of Copy_Traits for SM90 im2col
/// TMA load, with TMA descriptor and barrier.
template <class NumBitsPerTMA>
struct Copy_Traits<SM90_TMA_LOAD_IM2COL_OP, NumBitsPerTMA>
: TMA_LOAD_IM2COL_Unpack<SM90_TMA_LOAD_IM2COL_OP>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1, NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1, NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_LOAD_IM2COL arguments
tuple<
Im2ColTmaDescriptor const*,
uint64_t* // smem mbarrier
> const opargs_;
};
template <class NumBitsPerTMA, class... Args>
struct Copy_Traits<SM90_TMA_LOAD_IM2COL::PREFETCH, NumBitsPerTMA, Args...>
: TMA_LOAD_IM2COL_Unpack<SM90_TMA_LOAD_IM2COL::PREFETCH>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1, NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1, NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_LOAD_IM2COL::PREFETCH arguments
tuple<Im2ColTmaDescriptor const*> const opargs_;
CUTE_HOST_DEVICE
Copy_Traits(Copy_Traits<SM90_TMA_LOAD_IM2COL, NumBitsPerTMA, Args...> const& traits)
: opargs_({&traits.tma_desc_}) {}
};
//////////////////////////////////////////////////////////////////////////////
///////////////////////////// TMA_LOAD_MULTICAST /////////////////////////////
//////////////////////////////////////////////////////////////////////////////
struct SM90_TMA_LOAD_IM2COL_MULTICAST_OP : SM90_TMA_LOAD_IM2COL_MULTICAST {};
/// @brief Non-executable specialization of Copy_Traits for SM90
/// im2col TMA load, with TMA descriptor but no barrier or multicast
/// mask.
///
/// Use `.with(memory_barrier)` to construct an executable version.
template <class NumBitsPerTMA, class TMATensor>
struct Copy_Traits<SM90_TMA_LOAD_IM2COL_MULTICAST, NumBitsPerTMA, TMATensor>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1, NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1, NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
Im2ColTmaDescriptor tma_desc_;
TMATensor tma_tensor_;
CUTE_HOST_DEVICE constexpr
Im2ColTmaDescriptor const*
get_tma_descriptor() const {
return &tma_desc_;
}
template <class GShape>
CUTE_HOST_DEVICE constexpr
TMATensor const
get_tma_tensor(GShape const&) const
{
return tma_tensor_;
}
/// @brief Get an executable specialization.
///
/// Copy_Traits specializations with SM90_TMA_LOAD_IM2COL_MULTICAST
/// are not directly executable. Instead, call this "with" member
/// function to get an executable specialization. "Executable"
/// means that @c copy_unpack works.
///
/// @param tma_mbar Memory barrier for synchronization
///
/// @param multicast_mask Multicast mask (defaults to a single CTA)
///
/// @return Executable specialization of @c Copy_Traits
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM90_TMA_LOAD_IM2COL_MULTICAST_OP, NumBitsPerTMA>
with(uint64_t& tma_mbar, uint16_t const& multicast_mask) const {
return {{}, {&tma_desc_, &tma_mbar, multicast_mask}};
}
// Copy_Traits specializations with SM90_TMA_LOAD_IM2COL_MULTICAST
// are not directly executable. Instead, call .with to get an
// executable specialization.
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst) = delete;
};
/// @brief Executable specialization of Copy_Traits for SM90 multicast
/// im2col TMA load, with TMA descriptor, barrier, and multicast mask.
template <class NumBitsPerTMA>
struct Copy_Traits<SM90_TMA_LOAD_IM2COL_MULTICAST_OP, NumBitsPerTMA>
: TMA_LOAD_IM2COL_Unpack<SM90_TMA_LOAD_IM2COL_MULTICAST_OP>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit.
using SrcLayout = Layout<Shape<_1, NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1, NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_LOAD_IM2COL_MULTICAST arguments
tuple<
Im2ColTmaDescriptor const*,
uint64_t*, // smem mbarrier
uint16_t // multicast mask
> const opargs_;
};
//////////////////////////////////////////////////////////////////////////////
///////////////////////////// TMA_STORE IM2COL////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// The executable SM90_TMA_STORE_IM2COL with tma_desc
template <class NumBitsPerTMA, class TMATensor>
struct Copy_Traits<SM90_TMA_STORE_IM2COL, NumBitsPerTMA, TMATensor>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_STORE_IM2COL arguments
Im2ColTmaDescriptor tma_desc_;
TMATensor tma_tensor_;
// Return TmaDescriptor/TensorMap
CUTE_HOST_DEVICE constexpr
Im2ColTmaDescriptor const*
get_tma_descriptor() const {
return &tma_desc_;
}
template <class GShape>
CUTE_HOST_DEVICE constexpr
TMATensor const
get_tma_tensor(GShape const&) const
{
return tma_tensor_;
}
// This is the copy_unpack dispatch for this Copy_Traits
// Src needs to be a smem tensor
// Dst needs to be a gmem tensor with TmaCoordIterator .data()
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_smem<TS>::value, "Expected smem src for SM90_TMA_STORE_IM2COL");
void const* const desc_ptr = &(traits.tma_desc_);
void const* const src_ptr = cute::raw_pointer_cast(src.data());
auto dst_coord = flatten(take<0,3>(dst(Int<0>{})));
return detail::explode_tuple(detail::CallCOPY<SM90_TMA_STORE_IM2COL>{},
make_tuple(desc_ptr, src_ptr), seq<0,1>{},
dst_coord, tuple_seq<decltype(dst_coord)>{});
}
};
namespace detail {
/// @brief Creates a TMA descriptor for im2col TMA load.
///
/// @param tensor_cwhdn Global activation tensor (A matrix of Fprop).
/// This is the original (not im2col-transformed) tensor in global
/// memory.
///
/// @param slayout Rank 2 (M,K) shared memory layout of the activation
/// tensor. Here, K is "GEMM K," not the filter tensor's mode of
/// the same name.
//////
/// @param traversal_stride Traversal strides convolution parameter
//////
/// Each of padding_shape, traversal_stride, and dilation_shape is a
/// tuple whose size is the number of spatial modes (e.g., 3 for a 5-D
/// convolution).
///
/// @return TMA descriptor for im2col TMA load
template <class EngineA, class LayoutA,
class SmemSwizzle, class TMALayout,
class LowerCornerStride,
class UpperCornerStride,
class LowerPaddingStride,
class UpperPaddingStride,
class TraversalStride,
class LowerSRTStride,
class DilationStride>
CUTE_HOST
auto
make_im2col_tma_copy_desc(
Tensor<EngineA, LayoutA> const& tensor_cwhdn, // (C,W,H,D,N)
uint32_t range_c, // TILE_C
uint32_t range_whdn, // TILE_WHDN
SmemSwizzle const& smem_swizzle, // Swizzle
TMALayout const& tma_layout_vt, // TMA layout
LowerCornerStride const& lower_corner_whd, // WHD offset of the "base pointer"
UpperCornerStride const& upper_corner_whd, // WHD upper corner
LowerPaddingStride const& lower_padding_whd, // WHD lower padding
UpperPaddingStride const& upper_padding_whd, // WHD upper padding
TraversalStride const& stride_whd, // WHD traversal stride
LowerSRTStride const& lower_srt, // SRT offset of the "base pointer"
DilationStride const& stride_srt, // SRT stride - dilation
TMA::DescriptorAuxParams const& aux_params = {})
{
static_assert(is_gmem<EngineA>::value, "Tensor must point to GPU global memory.");
using value_type = typename EngineA::value_type;
constexpr uint32_t num_total_modes = LayoutA::rank;
constexpr int num_spatial_modes = num_total_modes - 2;
// Gmem starting address
void* gmem_address = (void*) raw_pointer_cast(tensor_cwhdn.data());
// Gmem extents are just the tensor shape
cute::array<uint64_t, 5> gmem_prob_shape = {1,1,1,1,1};
for_each(make_seq<num_total_modes>{}, [&](auto i) {
gmem_prob_shape[i] = static_cast<uint64_t>(shape<i>(tensor_cwhdn));
});
// Gmem strides are byte strides of the activation tensor in CWHDN order
cute::array<uint64_t, 5> gmem_prob_stride = {0,0,0,0,0};
for_each(make_seq<num_total_modes>{}, [&](auto i) {
gmem_prob_stride[i] = sizeof(value_type) * stride<i>(tensor_cwhdn);
});
// Traversal strides are a function of the dilation shape
// corresponding to spatial (WHD) modes.
cute::array<uint32_t, 5> tma_traversal_strides = {1,1,1,1,1};
for_each(make_seq<num_spatial_modes>{}, [&](auto i) {
tma_traversal_strides[i+1] = static_cast<uint32_t>(get<i>(stride_whd));
});
cute::array<int32_t, num_spatial_modes> tma_lower_corner{};
for_each(make_seq<num_spatial_modes>{}, [&](auto i) {
tma_lower_corner[i] = static_cast<int32_t>(get<i>(lower_corner_whd));
});
cute::array<int32_t, num_spatial_modes> tma_upper_corner{};
for_each(make_seq<num_spatial_modes>{}, [&](auto i) {
tma_upper_corner[i] = static_cast<int32_t>(get<i>(upper_corner_whd));
});
Im2ColTmaDescriptor tma_desc;
#if (__CUDACC_VER_MAJOR__ >= 12)
CUtensorMapDataType tma_format = TMA::to_CUtensorMapDataType<value_type>();
CUtensorMapInterleave tma_interleave = CU_TENSOR_MAP_INTERLEAVE_NONE;
CUtensorMapL2promotion tma_l2Promotion = to_CUtensorMapL2promotion(aux_params.l2promo_);
CUtensorMapFloatOOBfill tma_oob_fill = to_CUtensorMapFloatOOBfill(aux_params.oobfill_);
CUtensorMapSwizzle tma_swizzle = TMA::to_CUtensorMapSwizzle(detail::get_tma_swizzle_bits(smem_swizzle));
CUresult encode_result = cuTensorMapEncodeIm2col(
&tma_desc,
tma_format,
num_total_modes,
gmem_address,
gmem_prob_shape.data(),
gmem_prob_stride.data() + 1, // gmem_prob_stride[0] implicitly sizeof(value_type)
tma_lower_corner.data(),
tma_upper_corner.data(),
range_c,
range_whdn,
tma_traversal_strides.data(),
tma_interleave,
tma_swizzle,
tma_l2Promotion,
tma_oob_fill);
// The extra asserts help indicate the error's cause.
assert(encode_result != CUDA_ERROR_DEINITIALIZED);
assert(encode_result != CUDA_ERROR_NOT_INITIALIZED);
assert(encode_result != CUDA_ERROR_INVALID_CONTEXT);
assert(encode_result != CUDA_ERROR_INVALID_VALUE);
assert(encode_result == CUDA_SUCCESS);
#endif // (__CUDACC_VER_MAJOR__ >= 12)
//
// Calculate gemm shapes and linearized shapes based on tma layout tiling.
//
// Compute [w, h, d, n]
// q/p/z = (w/h/d + (upper_corner_whd - lower_corner_whd - 1)) / stride_whd + 1
auto gemm_mn_ = cute::transform(cute::make_seq<num_spatial_modes>{}, [&](auto i) {
return (shape<i+1>(tensor_cwhdn) + get<i>(upper_corner_whd) - get<i>(lower_corner_whd) - Int<1>{}) / get<i>(stride_whd) + Int<1>{};
});
auto gemm_mn = append(gemm_mn_, shape<num_spatial_modes+1>(tensor_cwhdn));
// Compute [c, s, r, t]
// fprop/wgrad, s/r/t = 1 + (upper_padding_whd - upper_corner_whd) / stride_srt
// wgrad, s/r/t = 1 + (lower_padding_whd - lower_corner_whd) / stride_srt
auto gemm_k_ = cute::transform(cute::make_seq<num_spatial_modes>{}, [&](auto i) {
auto padding_size = conditional_return(get<i>(stride_srt) > Int<0>{},
get<i>(upper_padding_whd) - get<i>(upper_corner_whd),
get<i>(lower_corner_whd) - get<i>(lower_padding_whd));
return Int<1>{} + padding_size / get<i>(stride_srt);
});
auto gemm_k = prepend(gemm_k_, shape<0>(tensor_cwhdn));
// For fprop/dgrad kernel, gemm_shapes is ((q, p, z, n), (c, s, r, t))
// For wgrad kernel, gemm_shapes is ((c, s, r, t), (q, p, z, n))
auto gemm_shapes_common = make_shape(
transform_leaf(gemm_mn, [](auto s) {
return conditional_return(cute::is_static<decltype(s)>{}, s, cutlass::FastDivmod(s));
}),
gemm_k);
auto gemm_shapes = make_shape(
basis_get(stride<0,1>(tma_layout_vt), gemm_shapes_common),
basis_get(stride<0,0>(tma_layout_vt), gemm_shapes_common));
// For fprop/dgrad kernel, linearized shapes is (whdn, (c, s, r, t))
// For wgrad kernel linearized shapes is ((c, s, r, t), whdn)
auto linear_shapes_common = make_shape(size(gemm_mn), gemm_k);
auto linear_shapes = make_shape(
basis_get(stride<0,1>(tma_layout_vt), linear_shapes_common),
basis_get(stride<0,0>(tma_layout_vt), linear_shapes_common));
//
// Calculate gmem basis stride based on tma layout tiling.
//
auto tma_basis_scale = make_shape(Int<1>{}, stride_whd, Int<1>{}, stride_srt);
auto tma_basis = elem_scale(tma_basis_scale, make_basis_like(tma_basis_scale));
auto gbasis_strides_common = make_stride(
append(get<1>(tma_basis), get<2>(tma_basis)),
prepend(get<3>(tma_basis), get<0>(tma_basis))); // ((w,h,d,n),(c,s,r,t))
auto gbasis_strides = make_stride(
basis_get(stride<0,1>(tma_layout_vt), gbasis_strides_common),
basis_get(stride<0,0>(tma_layout_vt), gbasis_strides_common));
//
// Create tma tensor
//
auto lower_corner = make_arithmetic_tuple(Int<0>{}, lower_corner_whd, Int<0>{}, lower_srt);
auto tensor_multimode = make_tensor(ArithmeticTupleIterator(lower_corner), gemm_shapes, gbasis_strides);
auto tensor_linear = make_identity_tensor(linear_shapes);
auto tma_tensor = make_tensor(tensor_multimode.data(), composition(
tensor_multimode.layout(),
tensor_linear(Int<0>{}),
tensor_linear.layout()));
return cute::make_tuple(tma_desc, tma_tensor);
}
template <class CopyOp,
class GEngine, class GLayout,
class SLayout,
class VShape, class VStride,
class LowerCornerStride,
class UpperCornerStride,
class LowerPaddingStride,
class UpperPaddingStride,
class TraversalStride,
class LowerSRTStride,
class DilationStride>
CUTE_HOST_RTC
auto
make_tma_atom_im2col(CopyOp,
Tensor<GEngine,GLayout> const& gtensor, // Full GMEM Tensor: ((w, h, d, n), c)
SLayout const& slayout, // CTA Tile of SMEM, potentially swizzled
int32_t const& num_multicast, // The number of CTAs involved in multicasting
Layout<VShape,VStride> const& cta_v_map, // V: CTA val idx -> gmem mode
LowerCornerStride const& lower_corner_whd,
UpperCornerStride const& upper_corner_whd,
LowerPaddingStride const& lower_padding_whd,
UpperPaddingStride const& upper_padding_whd,
TraversalStride const& stride_whd, // traversal stride
LowerSRTStride const& lower_srt,
DilationStride const& stride_srt, // dilation
TMA::DescriptorAuxParams const& aux_params = {})
{
//
// TMA parameter checking
//
CUTE_STATIC_ASSERT_V(product_each(shape(slayout)) == product_each(shape(cta_v_map)),
"TMA requires CTA_Tile and SLayout top-level shape equivalence.");
//
// TMA slayout manipulation
//
// Invert the smem to get the largest contiguous vector in the smem layout
auto inv_smem_layout = right_inverse(get_nonswizzle_portion(slayout));
// trunc_smem_idx -> trunc_smem_coord
// Map from smem idx to a gmem mode
auto sidx_to_gmode = coalesce(composition(cta_v_map, inv_smem_layout));
#if 0
print("g_layout : "); print(gtensor.layout()); print("\n");
print("s_layout : "); print(slayout); print("\n");
print("cta_t_map : "); print(cta_t_map); print("\n");
print("cta_v_map : "); print(cta_v_map); print("\n");
print("inv_smem : "); print(inv_smem_layout); print("\n");
print("sidx_to_gmode : "); print(sidx_to_gmode); print("\n");
#endif
//
// TMA gtensor manipulation
//
// Generate a TupleBasis for the gtensor
auto glayout_basis = make_identity_layout(product_each(shape(gtensor)));
// Tile the modes of gtensor with the truncated cta_v_map o inv_smem_layout_trunc
auto tma_layout_full = flatten(composition(glayout_basis, sidx_to_gmode));
// Truncate any incompatibilities -- no starting in the middle of gmodes
auto smem_rank = find_if(stride(tma_layout_full), [](auto e) {
[[maybe_unused]] auto v = basis_value(e);
return not is_constant<1,decltype(v)>{};
});
static_assert(smem_rank >= 2, "IM2COL expects at least 2 modes of the smem to vectorize with gmem.");
// IM2COL uses a maximum of 2 modes
constexpr int smem_tma_rank = cute::min(int(smem_rank), 2);
// Keep only the static-1 basis modes into gmem
auto tma_layout_trunc = take<0,smem_tma_rank>(tma_layout_full);
// Split according to the portion each multicast CTA will be responsible for
auto tma_layout_vt = logical_divide(tma_layout_trunc, shape_div(size(tma_layout_trunc), num_multicast));
#if 0
print("glayout_basis : "); print(glayout_basis); print("\n");
print("tma_layout_full : "); print(tma_layout_full); print("\n");
print("tma_layout_trunc: "); print(tma_layout_trunc); print("\n");
print("tma_layout_vt : "); print(tma_layout_vt); print("\n");
#endif
auto range_c = size<0,0>(tma_layout_vt);
auto range_whdn = size<0,1>(tma_layout_vt);
Tensor gtensor_cwhdn = make_tensor(gtensor.data(),
flatten(make_layout(basis_get(stride<0,0>(tma_layout_vt), gtensor.layout()),
basis_get(stride<0,1>(tma_layout_vt), gtensor.layout()))));
auto [tma_desc, tma_tensor] = make_im2col_tma_copy_desc(
gtensor_cwhdn,
range_c,
range_whdn,
detail::get_swizzle_portion(slayout),
tma_layout_vt,
lower_corner_whd,
upper_corner_whd,
lower_padding_whd,
upper_padding_whd,
stride_whd,
lower_srt,
stride_srt,
aux_params);
//
// Construct the Copy_Traits
//
using T = typename GEngine::value_type;
constexpr int num_bits_per_tma = decltype(size(tma_layout_trunc))::value * sizeof(T) * 8;
using Traits = Copy_Traits<CopyOp, cute::C<num_bits_per_tma>, decltype(tma_tensor)>;
using Atom = Copy_Atom<Traits, typename GEngine::value_type>;
#if 0
print("num_bits : "); print(num_bits_per_tma); print("\n");
#endif
Traits tma_traits{tma_desc, tma_tensor};
// Return the Copy_Atom
return Atom{tma_traits};
}
/// Make a TiledCopy for im2col TMA load.
///
/// @param copy_op The copy implementation: either
/// SM90_TMA_LOAD_IM2COL or SM90_TMA_LOAD_IM2COL_MULTICAST.
///
/// @param tensor_cwhdn The global tensor to use for im2col TMA loads.
/// For Fprop convolutions, this is the activation tensor. This is
/// the "original tensor that points to global memory, not the
/// coordinate (im2col-transformed) tensor.
///
/// @param slayout Layout of shared memory tile.
///
/// @param stride_whd The traversal strides convolution
/// parameter.
///
/// @return TiledCopy specialization for im2col TMA loads.
template <class CopyOp,
class GEngine, class GLayout,
class SLayout,
class TShape, class TStride,
class VShape, class VStride,
class LowerCornerStride,
class UpperCornerStride,
class LowerPaddingStride,
class UpperPaddingStride,
class TraversalStride,
class LowerSRTStride,
class DilationStride>
CUTE_HOST_RTC
auto
make_tma_copy_im2col(CopyOp const& copy_op,
Tensor<GEngine,GLayout> const& gtensor,
SLayout const& slayout,
Layout<TShape,TStride> const& cta_t_map, // CTA tid -> logical TMA tid
Layout<VShape,VStride> const& cta_v_map, // CTA vid -> gmem coord
LowerCornerStride const& lower_corner_whd,
UpperCornerStride const& upper_corner_whd,
LowerPaddingStride const& lower_padding_whd,
UpperPaddingStride const& upper_padding_whd,
TraversalStride const& stride_whd, // traversal stride
LowerSRTStride const& lower_srt,
DilationStride const& stride_srt, // dilation
TMA::DescriptorAuxParams const& aux_params = {})
{
//
// TMA parameter checking
//
CUTE_STATIC_ASSERT_V(size(slayout) % cosize(cta_t_map) == Int<0>{},
"Number of active CTAs in TMA must divide domain size of slayout.");
Copy_Atom atom = make_tma_atom_im2col(copy_op, gtensor, slayout, cosize(cta_t_map), cta_v_map,
lower_corner_whd, upper_corner_whd, lower_padding_whd,
upper_padding_whd, stride_whd, lower_srt, stride_srt, aux_params);
//
// Construct the TiledCopy
//
auto cta_tiler = product_each(shape(cta_v_map));
auto num_elems_per_tma = size<1>(typename decltype(atom)::RefLayout{}) / static_value<sizeof_bits<typename GEngine::value_type>>();
// smem idx -> smem coord
auto inv_smem_layout = right_inverse(get_nonswizzle_portion(slayout));
// CTA V -> smem_coord
auto layout_v = composition(inv_smem_layout, num_elems_per_tma);
// Scale that up to cover all of the smem_coords
auto layout_V = tile_to_shape(make_layout(layout_v), size(cta_v_map));
// CTA T -> smem idx
auto layout_t = make_layout(cosize(cta_t_map), shape_div(num_elems_per_tma, cosize(cta_t_map)));
// CTA TID -> smem coord
auto layout_T = composition(inv_smem_layout, composition(layout_t, cta_t_map));
// Combine with the T mapping
[[maybe_unused]] auto layout_TV = make_layout(layout_T, layout_V);
#if 0
print("cta_tiler : "); print(cta_tiler); print("\n");
print("layout_v : "); print(layout_v); print("\n");
print("layout_V : "); print(layout_V); print("\n");
print("layout_t : "); print(layout_t); print("\n");
print("layout_T : "); print(layout_T); print("\n");
print("layout_TV : "); print(layout_TV); print("\n");
#endif
return TiledCopy<decltype(atom), decltype(layout_TV), decltype(cta_tiler)>{atom};
}
/// Make a TiledCopy for im2col TMA with no offsets.
/// E.g. im2col TMA load for C and im2col TMA store for D.
template <class CopyOp,
class GEngine, class GLayout,
class SLayout,
class TShape, class TStride,
class VShape, class VStride>
CUTE_HOST_RTC
auto
make_tma_copy_im2col(CopyOp const& copy_op,
Tensor<GEngine,GLayout> const& gtensor,
SLayout const& slayout,
Layout<TShape,TStride> const& cta_t_map, // CTA tid -> logical TMA tid
Layout<VShape,VStride> const& cta_v_map) // CTA vid -> gmem coord
{
constexpr int num_spatial_modes = rank<0>(GLayout{}) - 1;
return make_tma_copy_im2col(copy_op, gtensor, slayout, cta_t_map, cta_v_map,
append<num_spatial_modes>(Stride<_0>{}, Int<0>{}), // lower_corner_whd
append<num_spatial_modes>(Stride<_0>{}, Int<0>{}), // upper_corner_whd
append<num_spatial_modes>(Stride<_0>{}, Int<0>{}), // lower_padding_whd
append<num_spatial_modes>(Stride<_0>{}, Int<0>{}), // upper_padding_whd
append<num_spatial_modes>(Stride<_1>{}, Int<1>{}), // stride_whd
append<num_spatial_modes>(Stride<_0>{}, Int<0>{}), // lower_srt
append<num_spatial_modes>(Stride<_1>{}, Int<1>{})); // stride_srt
}
} // namespace detail
template <class CopyOp,
class Engine0, class Layout0,
class SLayout,
class CTATiler,
class MulticastSize,
class LowerCornerStride,
class UpperCornerStride,
class LowerPaddingStride,
class UpperPaddingStride,
class TraversalStride,
class LowerSRTStride,
class DilationStride>
CUTE_HOST_RTC
auto
make_im2col_tma_copy(CopyOp const& copy_op,
Tensor<Engine0, Layout0> const& tensor_cwhdn,
SLayout const& slayout,
CTATiler const& cta_tiler,
MulticastSize const& multicast_size,
LowerCornerStride const& lower_corner_whd,
UpperCornerStride const& upper_corner_whd,
LowerPaddingStride const& lower_padding_whd,
UpperPaddingStride const& upper_padding_whd,
TraversalStride const& stride_whd,
LowerSRTStride const& lower_srt,
DilationStride const& stride_srt)
{
auto cta_v_tile = make_identity_layout(product_each(shape(tensor_cwhdn))).compose(cta_tiler);
auto cta_t_tile = make_layout(multicast_size);
return detail::make_tma_copy_im2col(copy_op, tensor_cwhdn,
slayout, cta_t_tile, cta_v_tile,
lower_corner_whd, upper_corner_whd, lower_padding_whd, upper_padding_whd, stride_whd, lower_srt, stride_srt);
}
// Explicit default for multicast_size
template <class CopyOp,
class Engine0, class Layout0,
class SLayout,
class CTATiler,
class LowerCornerStride,
class UpperCornerStride,
class LowerPaddingStride,
class UpperPaddingStride,
class TraversalStride,
class LowerSRTStride,
class DilationStride>
CUTE_HOST_RTC
auto
make_im2col_tma_copy(CopyOp const& copy_op,
Tensor<Engine0, Layout0> const& tensor_cwhdn,
SLayout const& slayout,
CTATiler const& cta_tiler,
LowerCornerStride const& lower_corner_whd,
UpperCornerStride const& upper_corner_whd,
LowerPaddingStride const& lower_padding_whd,
UpperPaddingStride const& upper_padding_whd,
TraversalStride const& stride_whd,
LowerSRTStride const& lower_srt,
DilationStride const& stride_srt)
{
return make_im2col_tma_copy(copy_op, tensor_cwhdn, slayout, cta_tiler, Int<1>{},
lower_corner_whd, upper_corner_whd, lower_padding_whd, upper_padding_whd, stride_whd, lower_srt, stride_srt);
}
// Explicit default for cta_tiler and multicast_size
template <class CopyOp,
class Engine0, class Layout0,
class SLayout,
class LowerCornerStride,
class UpperCornerStride,
class LowerPaddingStride,
class UpperPaddingStride,
class TraversalStride,
class LowerSRTStride,
class DilationStride>
CUTE_HOST_RTC
auto
make_im2col_tma_copy(CopyOp const& copy_op,
Tensor<Engine0, Layout0> const& tensor_cwhdn,
SLayout const& slayout,
LowerCornerStride const& lower_corner_whd,
UpperCornerStride const& upper_corner_whd,
LowerPaddingStride const& lower_padding_whd,
UpperPaddingStride const& upper_padding_whd,
TraversalStride const& stride_whd,
LowerSRTStride const& lower_srt,
DilationStride const& stride_srt)
{
return make_im2col_tma_copy(copy_op, tensor_cwhdn, slayout, product_each(shape(slayout)), Int<1>{},
lower_corner_whd, upper_corner_whd, lower_padding_whd, upper_padding_whd, stride_whd, lower_srt, stride_srt);
}
// No offsets copy.
template <class CopyOp,
class Engine0, class Layout0,
class SLayout,
class CTATiler,
class MulticastSize>
CUTE_HOST_RTC
auto
make_im2col_tma_copy(CopyOp const& copy_op,
Tensor<Engine0, Layout0> const& tensor_cwhdn,
SLayout const& slayout,
CTATiler const& cta_tiler,
MulticastSize const& multicast_size)
{
auto cta_v_tile = make_identity_layout(product_each(shape(tensor_cwhdn))).compose(cta_tiler);
auto cta_t_tile = make_layout(multicast_size);
return detail::make_tma_copy_im2col(copy_op, tensor_cwhdn, slayout, cta_t_tile, cta_v_tile);
}
// Explicit default for multicast_size
template <class CopyOp,
class Engine0, class Layout0,
class SLayout,
class CTATiler>
CUTE_HOST_RTC
auto
make_im2col_tma_copy(CopyOp const& copy_op,
Tensor<Engine0, Layout0> const& tensor_cwhdn,
SLayout const& slayout,
CTATiler const& cta_tiler)
{
return make_im2col_tma_copy(copy_op, tensor_cwhdn, slayout, cta_tiler, Int<1>{});
}
// Explicit default for cta_tiler and multicast_size
template <class CopyOp,
class Engine0, class Layout0,
class SLayout>
CUTE_HOST_RTC
auto
make_im2col_tma_copy(CopyOp const& copy_op,
Tensor<Engine0, Layout0> const& tensor_cwhdn,
SLayout const& slayout)
{
return make_im2col_tma_copy(copy_op, tensor_cwhdn, slayout, product_each(shape(slayout)), Int<1>{});
}
} // namespace cute
| include/cute/atom/copy_traits_sm90_im2col.hpp/0 | {
"file_path": "include/cute/atom/copy_traits_sm90_im2col.hpp",
"repo_id": "include",
"token_count": 17300
} | 13 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Portable bit field that supports byte and word straddling that can
be used in unions to bit-wise define parameters.
*/
#pragma once
#include <cute/config.hpp>
#include <cute/numeric/numeric_types.hpp> // uint_bit_t
namespace cute
{
class dummy_type {};
template <uint32_t BitStart, uint32_t NumBits, class OtherValueType = dummy_type>
struct bit_field
{
static_assert(0 < NumBits && NumBits <= 64, "bit_fields with more than 64 bits are not supported.");
// value_type: Use the smallest value type that fits NumBits
static constexpr uint32_t value_type_bits = (NumBits <= 8) ? 8 :
(NumBits <= 16) ? 16 :
(NumBits <= 32) ? 32 : 64;
using value_type = cute::uint_bit_t<value_type_bits>;
// storage_type: Use the smallest storage_type that avoids boundary crossing
static constexpr uint32_t storage_type_bits = (BitStart / 8 == (BitStart + NumBits - 1) / 8) ? 8 :
(BitStart / 16 == (BitStart + NumBits - 1) / 16) ? 16 :
(BitStart / 32 == (BitStart + NumBits - 1) / 32) ? 32 : 64;
using storage_type = cute::uint_bit_t<storage_type_bits>;
static_assert(sizeof(OtherValueType) == sizeof(value_type) || is_same<OtherValueType,dummy_type>::value,
"sizeof(OtherValueType) must be same as sizeof(value_type).");
// Number of storage values needed: ceil_div(BitStart + NumBits, storage_type_bits)
static constexpr uint32_t N = (BitStart + NumBits + storage_type_bits - 1) / storage_type_bits;
// Index of storage value for BitStart
static constexpr uint32_t idx = BitStart / storage_type_bits;
// Bit of data_[idx] for BitStart
static constexpr uint32_t bit_lo = BitStart % storage_type_bits;
// Number of bits in data_[idx] used for NumBits if straddling, else 0
static constexpr uint32_t bit_hi = (idx + 1 < N) ? (storage_type_bits - bit_lo) : 0;
public:
// NumBits mask
static constexpr value_type mask = value_type(uint64_t(-1) >> (64u - NumBits));
// NumBits mask for BitStart
static constexpr storage_type mask_lo = storage_type(mask) << bit_lo;
// NumBits mask for leftover bits in data_[idx+1] if straddling, else 0
static constexpr storage_type mask_hi = (idx + 1 < N) ? (storage_type(mask) >> bit_hi) : 0;
storage_type data_[N];
// Get value
CUTE_HOST_DEVICE constexpr
value_type get() const {
storage_type result = (data_[idx] & mask_lo) >> bit_lo;
if constexpr (bit_hi != 0) {
result |= (data_[idx+1] & mask_hi) << bit_hi;
}
return static_cast<value_type>(result);
}
// Set value
CUTE_HOST_DEVICE constexpr
void set(value_type x) {
storage_type item = static_cast<storage_type>(x & mask);
data_[idx] = static_cast<storage_type>((data_[idx] & ~mask_lo) | (item << bit_lo));
if constexpr (bit_hi != 0) {
data_[idx+1] = static_cast<storage_type>((data_[idx+1] & ~mask_hi) | (item >> bit_hi));
}
}
// Assign value
CUTE_HOST_DEVICE constexpr
bit_field& operator=(value_type x) {
set(x);
return *this;
}
// Cast to value
CUTE_HOST_DEVICE constexpr
operator value_type () const {
return get();
}
// Assign OtherValueType
CUTE_HOST_DEVICE constexpr
bit_field& operator=(OtherValueType x) {
return *this = *reinterpret_cast<value_type*>(&x);
}
// Cast to OtherValueType
CUTE_HOST_DEVICE constexpr
operator OtherValueType () const {
value_type x = get();
return *reinterpret_cast<OtherValueType*>(&x);
}
};
} // end namespace cute
| include/cute/container/bit_field.hpp/0 | {
"file_path": "include/cute/container/bit_field.hpp",
"repo_id": "include",
"token_count": 1951
} | 14 |
/***************************************************************************************************
* Copyright (c) 2024 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Sparse matrix multiply accumulate for SM89
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
#if (__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 4)
# define CUTLASS_ARCH_SPARSE_MMA_SM89_SUPPORTED 1
#endif
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM89_SUPPORTED) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 890)
# define CUTLASS_ARCH_SPARSE_MMA_SM89_ENABLED
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = fe4m3 * fe4m3 + F32
template <typename Operator_>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
cutlass::float_e4m3_t,
layout::RowMajor,
cutlass::float_e4m3_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_,
SPFormatType::Thread> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = cutlass::float_e4m3_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e4m3_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 16>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<ElementC, 4>;
using FragmentE = uint32_t;
using Operator = Operator_;
using ArchTag = arch::Sm89;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.f32.e4m3.e4m3.f32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
}
else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = fe4m3 * fe5m2 + F32
template <typename Operator_>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
cutlass::float_e4m3_t,
layout::RowMajor,
cutlass::float_e5m2_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_,
SPFormatType::Thread> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = cutlass::float_e4m3_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e5m2_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 16>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<ElementC, 4>;
using FragmentE = uint32_t;
using Operator = Operator_;
using ArchTag = arch::Sm89;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.f32.e4m3.e5m2.f32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
}
else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = fe5m2 * fe4m3 + F32
template <typename Operator_>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
cutlass::float_e5m2_t,
layout::RowMajor,
cutlass::float_e4m3_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_,
SPFormatType::Thread> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = cutlass::float_e5m2_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e4m3_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 16>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<ElementC, 4>;
using FragmentE = uint32_t;
using Operator = Operator_;
using ArchTag = arch::Sm89;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.f32.e5m2.e4m3.f32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
}
else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = fe5m2 * fe5m2 + F32
template <typename Operator_>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
cutlass::float_e5m2_t,
layout::RowMajor,
cutlass::float_e5m2_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_,
SPFormatType::Thread> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = cutlass::float_e5m2_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e5m2_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 16>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<ElementC, 4>;
using FragmentE = uint32_t;
using Operator = Operator_;
using ArchTag = arch::Sm89;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.f32.e5m2.e5m2.f32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
}
else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/arch/mma_sparse_sm89.h/0 | {
"file_path": "include/cutlass/arch/mma_sparse_sm89.h",
"repo_id": "include",
"token_count": 5036
} | 15 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Utilities for performing block-striped access (load, store, reduce) of trivially-copyable,
statically-sized array types to global memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/wmma_array.h"
#include "cutlass/functional.h"
#include "cutlass/complex.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
// AccessWidth
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes the maximal power-of-two that evenly divides the size of T, capped at Limit
template <
typename T,
int Limit>
struct AccessWidth
{
// Inductive case
template <
int ObjectBytes, /// Size of T in bytes
int AlignBytes, /// Template induction variable
bool IsAligned = /// Whether ObjectBytes is an even multiple of AlignBytes
((AlignBytes <= Limit) && (ObjectBytes % AlignBytes == 0))>
struct Detail
{
static const int value = Detail<ObjectBytes, AlignBytes * 2>::value;
};
// Base case (ObjectBytes is not an even multiple of AlignBytes)
template <
int ObjectBytes, /// Size of T in bytes
int AlignBytes> /// Template induction variable
struct Detail<ObjectBytes, AlignBytes, false>
{
static const int value = AlignBytes / 2;
};
/// The maximal power-of-two that evenly divides the size of T
static const int value = Detail<
(int) sizeof(T),
1>::value;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// StripedAccessType
/////////////////////////////////////////////////////////////////////////////////////////////////
/// ReinterpretCast type for striping a trivially-copyable type in global memory
/// (Default specialization. Striping granularity is type T.)
template <
typename T, /// Data type
int TransferBytes = /// Data access width (16 byte max for global memory access on current architectures)
AccessWidth<T, 16>::value>
struct alignas(TransferBytes) StripedAccessType : public T
{};
/// ReinterpretCast type for striping a trivially-copyable type in global memory
/// (Specialization for cutlass::Array<T>. Striping granularity is a multiple of T.)
template <
typename T, /// Array element type
int N, /// Number of elements in array
bool RegisterSized, /// T is register-sized
int TransferBytes> /// Data access width
struct StripedAccessType<
Array<T, N, RegisterSized>,
TransferBytes>
: public AlignedArray<
T, // Element type of StripedAccessType
__NV_STD_MAX(1, TransferBytes / (int) sizeof(T)), // Number of elements T in StripedAccessType
TransferBytes> // Alignment of StripedAccessType
{};
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
/// ReinterpretCast type for striping a trivially-copyable type in global memory
/// (Specialization for cutlass::WmmaFragmentArray<T>. Striping granularity is a multiple of T.)
template<
typename Use,
int m,
int n,
int k,
typename ElementT,
typename Layout,
int kFragments,
int TransferBytes>
struct StripedAccessType<
WmmaFragmentArray<nvcuda::wmma::fragment<Use, m, n, k, ElementT, Layout>, kFragments>,
TransferBytes>
: public AlignedArray<
ElementT,
__NV_STD_MAX(1, TransferBytes / (int) sizeof(ElementT)),
TransferBytes>
{};
#endif // if defined(CUTLASS_ARCH_WMMA_ENABLED)
/////////////////////////////////////////////////////////////////////////////////////////////////
// BlockStriped
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Utility for performing block-striped access (load, store) of trivially-copyable,
/// statically-sized array types to global memory
template <
int BlockThreads,
typename ArrayT,
typename AccessT = StripedAccessType<ArrayT> >
struct BlockStriped
{
/// Number of striped accesses
static const int kStripes = int(sizeof(ArrayT) / sizeof(AccessT));
static_assert(kStripes > 0, "AccessT type must be smaller than or equal to ArrayT type");
/// Load
CUTLASS_DEVICE
static void load(ArrayT &data, ArrayT *ptr, int thread_idx)
{
AccessT *access_input = reinterpret_cast<AccessT*>(ptr);
AccessT *access_data = reinterpret_cast<AccessT*>(&data);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStripes; ++i) {
access_data[i] = access_input[(BlockThreads * i) + thread_idx];
}
}
/// Load & Add
CUTLASS_DEVICE
static void load_add(ArrayT &data, ArrayT *ptr, int thread_idx)
{
AccessT *access_input = reinterpret_cast<AccessT*>(ptr);
AccessT *access_data = reinterpret_cast<AccessT*>(&data);
plus<AccessT> add;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStripes; ++i)
{
access_data[i] = add(access_data[i], access_input[(BlockThreads * i) + thread_idx]);
}
}
/// Store
CUTLASS_DEVICE
static void store(ArrayT *ptr, const ArrayT &data, int thread_idx)
{
AccessT *access_output = reinterpret_cast<AccessT*>(ptr);
const AccessT *access_data = reinterpret_cast<const AccessT*>(&data);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStripes; ++i) {
access_output[(BlockThreads * i) + thread_idx] = access_data[i];
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// BlockStripedReduce
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Utility for performing block-striped access (load, store, reduce) of trivially-copyable,
/// statically-sized array types to global memory.
/// (Default specialization)
template <
int BlockThreads,
typename ArrayT,
typename ElementT = typename StripedAccessType<ArrayT>::Element>
struct BlockStripedReduce :
BlockStriped<
BlockThreads,
ArrayT,
ElementT>
{
/// Reduce
CUTLASS_DEVICE
static void reduce(ArrayT *ptr, const ArrayT &data, int thread_idx)
{
cutlass::atomic_add<ElementT> reduce;
ElementT *access_output = reinterpret_cast<ElementT*>(ptr);
const ElementT *access_data = reinterpret_cast<const ElementT*>(&data);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < BlockStripedReduce::kStripes; ++i) {
reduce(access_output + (BlockThreads * i) + thread_idx, access_data[i]);
}
}
};
/// Utility for performing block-striped access (load, store, reduce) of trivially-copyable,
/// statically-sized array types to global memory.
/// (Specialization for half_t. Uses half2 vectorized-reduction.)
template <
int BlockThreads,
typename ArrayT>
struct BlockStripedReduce<BlockThreads, ArrayT, half_t> :
BlockStriped<
BlockThreads,
ArrayT,
half2>
{
static_assert(BlockStripedReduce::kStripes % 2 == 0, "Array of half must be even number in length");
/// Reduce
CUTLASS_DEVICE
static void reduce(ArrayT *ptr, const ArrayT &data, int thread_idx)
{
cutlass::atomic_add<half2> reduce;
half2 *access_output = reinterpret_cast<half2*>(ptr);
const half2 *access_data = reinterpret_cast<const half2*>(&data);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < BlockStripedReduce::kStripes; ++i)
{
reduce(access_output + (BlockThreads * i) + thread_idx, access_data[i]);
}
}
};
} // namespace cutlass
| include/cutlass/block_striped.h/0 | {
"file_path": "include/cutlass/block_striped.h",
"repo_id": "include",
"token_count": 3040
} | 16 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Template for device-level Implicit GEMM Convolution
*/
#pragma once
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/device_kernel.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/cuda_host_adapter.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template<typename ImplicitGemmKernel_>
class ImplicitGemmConvolution {
public:
using UnderlyingKernel = ImplicitGemmKernel_;
using ElementA = typename UnderlyingKernel::ElementA;
using LayoutA = typename UnderlyingKernel::LayoutA;
using ElementB = typename UnderlyingKernel::ElementB;
using LayoutB = typename UnderlyingKernel::LayoutB;
using ElementC = typename UnderlyingKernel::ElementC;
using LayoutC = typename UnderlyingKernel::LayoutC;
using ElementAccumulator = typename UnderlyingKernel::ElementAccumulator;
using ElementCompute = typename UnderlyingKernel::ElementCompute;
using OperatorClass = typename UnderlyingKernel::OperatorClass;
using ArchTag = typename UnderlyingKernel::ArchTag;
using ThreadblockShape = typename UnderlyingKernel::ThreadblockShape;
using WarpShape = typename UnderlyingKernel::WarpShape;
using InstructionShape = typename UnderlyingKernel::InstructionShape;
using ThreadblockSwizzle = typename UnderlyingKernel::ThreadblockSwizzle;
using EpilogueOutputOp = typename UnderlyingKernel::EpilogueOutputOp;
static int const kStages = UnderlyingKernel::kStages;
static int const kConvDim = UnderlyingKernel::kConvDim;
using WarpMmaOperator = typename UnderlyingKernel::WarpMmaOperator;
using ArchMmaOperator = typename UnderlyingKernel::ArchMmaOperator;
using MathOperator = typename UnderlyingKernel::MathOperator;
static cutlass::conv::Operator const kConvolutionalOperator = UnderlyingKernel::kConvolutionalOperator;
static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = UnderlyingKernel::kIteratorAlgorithm;
static cutlass::conv::StrideSupport const kStrideSupport = UnderlyingKernel::kStrideSupport;
static cutlass::conv::GroupMode const kGroupMode = UnderlyingKernel::kGroupMode;
static bool const kEnableCudaHostAdapter = CUTLASS_ENABLE_CUDA_HOST_ADAPTER;
static int const kWarpCount =
(ThreadblockShape::kM / WarpShape::kM) *
(ThreadblockShape::kN / WarpShape::kN) *
(ThreadblockShape::kK / WarpShape::kK);
/// Argument structure
using Arguments = typename UnderlyingKernel::Arguments;
private:
/// Kernel parameters object
typename UnderlyingKernel::Params params_;
public:
/// Constructs Implicit GEMM
ImplicitGemmConvolution() { }
/// Determines whether the Implicit GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
// dispatch to iterators
Status status = UnderlyingKernel::Mma::IteratorA::can_implement(args.problem_size);
if (Status::kSuccess != status) {
return status;
}
status = UnderlyingKernel::Mma::IteratorB::can_implement(args.problem_size);
if (Status::kSuccess != status) {
return status;
}
// check group conv constraint
if (args.problem_size.groups != 1) {
if (kGroupMode == conv::GroupMode::kNone) {
return Status::kErrorInvalidProblem;
}
// C and K should be multiple of groups
if (args.problem_size.K % args.problem_size.groups ||
args.problem_size.C % args.problem_size.groups) {
return Status::kErrorInvalidProblem;
}
// split-k is not supported
if (args.problem_size.split_k_slices != 1) {
return Status::kErrorInvalidProblem;
}
int k_per_group = args.problem_size.K / args.problem_size.groups;
// k_per_group should be multiple of ThreadblockShape N, one CTA calculate one group
if (kGroupMode == conv::GroupMode::kSingleGroup && k_per_group % ThreadblockShape::kN) {
return Status::kErrorInvalidProblem;
}
// ThreadblockShape::kN should be divisible by k_per_group, one CTA calculate multiple groups
if (kGroupMode == conv::GroupMode::kMultipleGroup && ThreadblockShape::kN % k_per_group) {
return Status::kErrorInvalidProblem;
}
// current optimized iterator algo only supports SingleGroup mode
if (kIteratorAlgorithm == IteratorAlgorithm::kOptimized &&
kGroupMode != conv::GroupMode::kSingleGroup) {
return Status::kErrorInvalidProblem;
}
}
static int const kAlignmentC = UnderlyingKernel::Epilogue::OutputTileIterator::kElementsPerAccess;
if (kConvolutionalOperator == conv::Operator::kFprop) {
if (args.problem_size.K % kAlignmentC)
return Status::kErrorMisalignedOperand;
} else if (kConvolutionalOperator == conv::Operator::kDgrad || kConvolutionalOperator == conv::Operator::kDeconv) {
if (args.problem_size.C % kAlignmentC)
return Status::kErrorMisalignedOperand;
} else if (kConvolutionalOperator == conv::Operator::kWgrad) {
if (args.problem_size.C % kAlignmentC)
return Status::kErrorMisalignedOperand;
}
// check for unsupported problem sizes for strided dgrad / deconv implementation
if ((kConvolutionalOperator == conv::Operator::kDgrad || kConvolutionalOperator == conv::Operator::kDeconv) &&
kStrideSupport == conv::StrideSupport::kStrided) {
// split-k (serial or parallel) is not supported for strided dgrad / deconv
if(args.problem_size.split_k_slices > 1) {
return Status::kErrorNotSupported;
}
// dilation > {1x1} is not supported for strided dgrad / deconv
if(args.problem_size.dilation_h > 1 || args.problem_size.dilation_w > 1) {
return Status::kErrorNotSupported;
}
}
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(
threadblock_swizzle.get_tiled_shape(
kConvolutionalOperator,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices));
if (!(grid.y <= std::numeric_limits<uint16_t>::max() &&
grid.z <= std::numeric_limits<uint16_t>::max())) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t workspace_bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
kConvolutionalOperator,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices);
if(args.split_k_mode == SplitKMode::kParallel) {
// Split-K parallel: CTAs in k-dimension write the partial results in a temporary workspace.
// The user needs to call a reduction operator to optain the final output tensor
workspace_bytes =
sizeof(ElementAccumulator) *
size_t(cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, args.problem_size)) *
size_t(grid_tiled_shape.k());
}
else if(args.split_k_mode == SplitKMode::kSerial && args.problem_size.split_k_slices > 1) {
// Split-K serial: The user workspace is used to store semaphore and serialize writing the
// final reduced output to user's output tensor
workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n());
}
return workspace_bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr) {
if (args.problem_size.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
cudaError_t status = cudaMemsetAsync(workspace, 0, get_workspace_size(args), stream);
if (status != cudaSuccess) {
return Status::kErrorInternal;
}
}
// initialize the params structure from the arguments
params_ = typename UnderlyingKernel::Params(
args,
static_cast<int *>(workspace)
);
if constexpr (kEnableCudaHostAdapter) {
CUTLASS_ASSERT(cuda_adapter);
return Status::kSuccess;
}
else {
int smem_size = int(sizeof(typename UnderlyingKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(cutlass::Kernel<UnderlyingKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
return Status::kSuccess;
}
/// Initializes GEMM state from arguments.
Status update(Arguments const &args, void *workspace = nullptr) {
// update the params structure from the arguments
params_.ptr_A = args.ref_A.data();
params_.ptr_B = args.ref_B.data();
params_.ptr_C = args.ref_C.data();
params_.ptr_D = args.ref_D.data();
params_.output_op = args.output_op;
params_.semaphore = static_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(32 * kWarpCount, 1, 1);
int smem_size = int(sizeof(typename UnderlyingKernel::SharedStorage));
cutlass::Status launch_result = cutlass::Status::kSuccess ;
if constexpr (kEnableCudaHostAdapter) {
//
// Use the cuda host adapter
//
CUTLASS_ASSERT(cuda_adapter);
if (cuda_adapter) {
void* kernel_params[] = {¶ms_};
launch_result = cuda_adapter->launch(
grid, dim3(1,1,1), block, smem_size, stream, kernel_params, 0
);
}
else {
launch_result = Status::kErrorInternal;
}
}
else {
cutlass::Kernel<UnderlyingKernel><<<grid, block, smem_size, stream>>>(params_);
}
cudaError_t result = cudaGetLastError();
if (cudaSuccess == result && Status::kSuccess == launch_result) {
return Status::kSuccess;
}
else {
CUTLASS_TRACE_HOST(" Kernel launch failed. Reason: " << result);
return Status::kErrorInternal;
}
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) {
return run(stream, cuda_adapter);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) {
Status status = initialize(args, workspace, stream, cuda_adapter);
if (status == Status::kSuccess) {
status = run(stream, cuda_adapter);
}
return status;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/device/implicit_gemm_convolution.h/0 | {
"file_path": "include/cutlass/conv/device/implicit_gemm_convolution.h",
"repo_id": "include",
"token_count": 4688
} | 17 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dDgradFilterTileAccessIteratorAnalytic;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradFilterTileAccessIteratorAnalytic strided dgrad needs special handling to skip MMAs
// on non-contributing w positions
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradFilterTileAccessIteratorAnalytic <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kStrided,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or larger.");
//
// Parameters structure
//
using Params = Conv2dAnalyticParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
// For a fixed filter position (r,s) find and fill offset_k_, offset_c_ in strided and contiguous dimension
int filter_r_;
int filter_s_;
int start_r_;
int start_s_;
int offset_k_[ThreadMap::Iterations::kStrided];
int offset_c_[ThreadMap::Iterations::kContiguous];
public:
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_r_(start_r),
filter_s_(start_s),
start_r_(start_r),
start_s_(start_s) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
offset_c_[c] = threadblock_offset.column() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
}
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] =
threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// Moves filter_s
filter_s_ += problem_size_.stride_w;
if (filter_s_ < problem_size_.S) {
return;
}
// Restore filter_s
filter_s_ = start_s_;
// Move filter_r
filter_r_ += problem_size_.stride_h;
if (filter_r_ < problem_size_.R) {
return;
}
// Restore filter_r
filter_r_ = start_r_;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] += Shape::kRow * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the filter tensor w that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int k = offset_k_[iteration_strided_];
int c = offset_c_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements;
return TensorCoord(k, filter_r_, filter_s_, c);
}
/// Returns true if the current coordinate is within the filter tensor w
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.K && coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradFilterTileAccessIteratorAnalytic unity strided dgrad is more performant for dgrad
// on problem sizes with stride = {1x1}
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradFilterTileAccessIteratorAnalytic <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kUnity,
AccessType_
>{
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or larger.");
//
// Parameters structure
//
using Params = Conv2dAnalyticParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
// For a fixed filter position (r,s) find and fill offset_k_, offset_c_ in strided and contiguous dimension
int filter_r_;
int filter_s_;
int offset_k_[ThreadMap::Iterations::kStrided];
int offset_c_[ThreadMap::Iterations::kContiguous];
public:
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
offset_c_[c] = threadblock_offset.column() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
}
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] =
threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] += Shape::kRow * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the filter tensor w that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int k = offset_k_[iteration_strided_];
int c = offset_c_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements;
return TensorCoord(k, filter_r_, filter_s_, c);
}
/// Returns true if the current coordinate is within the filter tensor w
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.K && coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 5132
} | 18 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dWgradOutputGradientTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
using Params = Conv2dAnalyticParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
int filter_k_[ThreadMap::Iterations::kContiguous];
int offset_npq_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dWgradOutputGradientTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
// initialize filter_k for every contiguous iteration
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
filter_k_[c] = threadblock_offset.row() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
}
// initialize n, p, q offset for every strided iteration
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_npq_[s] = threadblock_offset.column() + thread_coord.strided()
+ s * ThreadMap::Delta::kStrided;
}
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_npq_) in GEMM-A by a CTA-K tile
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_npq_[s] += Shape::kColumn * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the output gradient tensor Dy that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int npq = offset_npq_[iteration_strided_];
int n = npq / (problem_size_.P * problem_size_.Q);
int residual = npq % (problem_size_.P * problem_size_.Q);
int p = residual / problem_size_.Q;
int q = residual % problem_size_.Q;
int k = filter_k_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements;
return TensorCoord(n, p, q, k);
}
/// Returns true if the current coordinate is within the output gradient tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.h() < problem_size_.P &&
coord.w() < problem_size_.Q &&
coord.c() < problem_size_.K;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dWgradOutputGradientTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv2d_wgrad_output_gradient_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv2d_wgrad_output_gradient_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 2897
} | 19 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/threadblock/depthwise_direct_conv_params.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Shape_,
typename OutputTileShape_,
typename StrideShape_,
typename DilationShape_,
typename ActivationShape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess> >
class DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation {
public:
//
// Types
//
using Shape = Shape_;
using OutputTileShape = OutputTileShape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
// Compilation value of stride , dialtion and activation shape
using StrideShape = StrideShape_;
using DilationShape = DilationShape_;
using ActivationShape = ActivationShape_;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static int const kActivationSize = ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess * ThreadMap::kThreads *
sizeof_bits<Element>::value / 8;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1, "Require Iterations::kContiguous == 1");
static_assert(OutputTileShape::kN == 1, "Require OutputTileShape::kN == 1");
static_assert(OutputTileShape::kC == Shape::kColumn, "Require OutputTile shape == channels per threadblock");
//
// Parameters structure
//
using Params = Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams<Layout>;
private:
Conv2dProblemSize const &problem_size_;
Params const ¶ms_;
char const *pointer_;
// Base channels for current threadblock
int base_c_;
// Base activation index for current threadblock
int offset_intial_npq_;
// Base activation coord for current threadblock
TensorCoord activatioin_base_;
// Intial thread positioin
int offset_initial_hwc_;
// Overall load instruction per thread.
int iterator_load_;
// thread loading position.
int iterator_hwc_;
// activation N is inside the Tensor or not
bool valid_n_;
public:
CUTLASS_HOST_DEVICE
DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset =
MatrixCoord()
)
: params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
offset_intial_npq_(threadblock_offset.row()),
offset_initial_hwc_(thread_idx),
iterator_load_(0) {
base_c_ = threadblock_offset.column();
set_iteration_index(0);
set_activation_coord(offset_intial_npq_);
}
CUTLASS_HOST_DEVICE
void set_activation_coord(int offset_npq) {
int offset_inital_n, offset_inital_p, offset_inital_q;
int residual;
params_.pq_divmod(offset_inital_n, residual, offset_npq);
params_.q_divmod(offset_inital_p, offset_inital_q, residual);
int base_n = offset_inital_n;
int base_h =
offset_inital_p * OutputTileShape::kH * StrideShape::kRow - problem_size_.pad_h;
int base_w =
offset_inital_q * OutputTileShape::kW * StrideShape::kColumn - problem_size_.pad_w;
activatioin_base_ = TensorCoord(base_n, base_h, base_w, base_c_);
valid_n_ = activatioin_base_.n() < problem_size_.N;
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(
problem_size,
layout,
{Shape::kRow, Shape::kColumn},
{OutputTileShape::kN, OutputTileShape::kH, OutputTileShape::kW, OutputTileShape::kC},
kActivationSize);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iterator_hwc_ = offset_initial_hwc_ + index * ThreadMap::kThreads;
iterator_load_ = index;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// Go to next threadblock
offset_intial_npq_ += problem_size_.split_k_slices;
set_iteration_index(0);
set_activation_coord(offset_intial_npq_);
}
/// Returns the coordinate in the activations tensor X that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int c = iterator_hwc_ % ThreadMap::Detail::ShapeVec::kContiguous ;
int next = iterator_hwc_ / ThreadMap::Detail::ShapeVec::kContiguous ;
int h = next / ActivationShape::kW;
int w = next % ActivationShape::kW;
c = c * AccessType::kElements;
return activatioin_base_ + TensorCoord(0, h, w, c);
}
/// Returns true if the current coordinate is within the activations tensor X
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
bool valid_c = coord.c() < problem_size_.C;
bool valid_h = coord.h() >= 0 && coord.h() < problem_size_.H;
bool valid_w = coord.w() >= 0 && coord.w() < problem_size_.W;
return valid_n_ ? valid_c & valid_h & valid_w : 0;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
AccessType const *ptr =
reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
return ptr;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation &operator++() {
++iterator_load_;
iterator_hwc_ += ThreadMap::kThreads;
if (iterator_load_ < ThreadMap::Iterations::kCount) {
return *this;
}
iterator_load_ = 0;
iterator_hwc_ = offset_initial_hwc_;
return *this;
}
/// Determines the activation size loaded by iterator
CUTLASS_HOST_DEVICE
int get_load_size() {
return kActivationSize;
}
/// Determines the iterations needed
CUTLASS_HOST_DEVICE
int get_iteration_num() {
return ThreadMap::Iterations::kCount;
}
/// Determines whether the Depthwise fprop can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check stride and dilation constraint
if (problem_size.stride_h != StrideShape::kRow || problem_size.stride_w != StrideShape::kColumn) {
return Status::kErrorInvalidProblem;
}
if (problem_size.dilation_h != DilationShape::kRow || problem_size.dilation_w != DilationShape::kColumn) {
return Status::kErrorInvalidProblem;
}
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_fixed_stride_dilation.h/0 | {
"file_path": "include/cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_fixed_stride_dilation.h",
"repo_id": "include",
"token_count": 3553
} | 20 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level per channel scale+bias+relu before
matrix multiply-accumulate operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename FragmentActivations, typename FragmentScaleBias>
struct FpropScaleBiasReluTransform {
using T = typename FragmentActivations::Element;
static int const NumActivations = FragmentActivations::kElements;
static int const NumScaleBias = FragmentScaleBias::kElements;
static int const MmaElements = 2;
// One element has one scale and one bias
static int const MmaScaleBiasPair = 2;
// 16816 has 2 columns
static int const MmaCols = 2;
using MmaOperand = Array<T, MmaElements>;
using ScaleBiasOperand = Array<T, MmaElements * MmaScaleBiasPair>;
CUTLASS_DEVICE
void transform(MmaOperand &activations, ScaleBiasOperand const &scale_bias) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
uint32_t *ptr_activations = reinterpret_cast<uint32_t *>(&activations);
uint32_t const *ptr_scale_bias = reinterpret_cast<uint32_t const *>(&scale_bias);
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
// We assumes the pair of FP16 are either both inbound or both out-of-bound.
// It requires C to be an even number.
asm volatile(
"{\n\t"
" .reg .pred %%p;\n\t"
" .reg .b32 t1;\n\t"
" setp.eq.u32 %%p, %2, %4;\n\t"
" fma.rn.f16x2.relu t1, %1, %2, %3;\n"
" selp.u32 %0, 0, t1, %%p;\n\t"
"}\n"
: "=r"(ptr_activations[0])
: "r"(ptr_scale_bias[0]), "r"(ptr_activations[0]),
"r"(ptr_scale_bias[1]), "n"(cutlass::arch::OOB_NAN_F16x2));
#else
assert(0);
#endif
}
CUTLASS_DEVICE
void operator()(FragmentActivations &activations,
FragmentScaleBias const &scale_bias) {
MmaOperand *ptr_activations = reinterpret_cast<MmaOperand *>(&activations);
ScaleBiasOperand const *ptr_scale_bias =
reinterpret_cast<ScaleBiasOperand const *>(&scale_bias);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < (NumActivations / MmaElements); ++i) {
transform(ptr_activations[i], ptr_scale_bias[(i / MmaScaleBiasPair) % MmaCols]);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename FragmentActivations, typename FragmentScaleBias>
struct WgradScaleBiasReluTransform {
using T = typename FragmentActivations::Element;
static int const NumActivations = FragmentActivations::kElements;
static int const NumScaleBias = FragmentScaleBias::kElements;
static int const MmaElements = 2;
// One element has one scale and one bias
static int const MmaScaleBiasPair = 2;
// 16816 has 2 rows
static int const MmaRows = 2;
using MmaOperand = Array<T, MmaElements>;
using ScaleBiasOperand = Array<__half2, MmaScaleBiasPair>;
CUTLASS_DEVICE
void transform(MmaOperand &activations, ScaleBiasOperand const &scale_bias) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
__half2 *ptr_activations = reinterpret_cast<__half2 *>(&activations);
uint32_t const *ptr_scale_bias = reinterpret_cast<uint32_t const *>(&scale_bias);
#if 1
// CUDA + PTX version
bool h1_oob = (reinterpret_cast<uint16_t &>(ptr_activations[0].x) == cutlass::arch::OOB_NAN_F16);
bool h2_oob = (reinterpret_cast<uint16_t &>(ptr_activations[0].y) == cutlass::arch::OOB_NAN_F16);
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
// We cannot gurantee that the pair of F16 are both in bound or both
// out-of-bound because C x R x S can be an odd number.
asm volatile(
"{\n\t"
" fma.rn.f16x2.relu %0, %1, %2, %3;\n"
"}"
: "=r"(reinterpret_cast<uint32_t &>(ptr_activations[0]))
: "r"(ptr_scale_bias[0]), "r"(reinterpret_cast<uint32_t &>(ptr_activations[0])),
"r"(ptr_scale_bias[1]));
reinterpret_cast<uint32_t &>(ptr_activations[0]) = h1_oob ?
(reinterpret_cast<uint32_t &>(ptr_activations[0]) & 0xffff0000) :
reinterpret_cast<uint32_t &>(ptr_activations[0]);
reinterpret_cast<uint32_t &>(ptr_activations[0]) = h2_oob ?
(reinterpret_cast<uint32_t &>(ptr_activations[0]) & 0xffff) :
reinterpret_cast<uint32_t &>(ptr_activations[0]);
#else
// pure PTX version
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
asm volatile(
"{\n"
" .reg .b16 t1, t2;\n"
" .reg .b32 t3, t4, t5, t6;\n"
" .reg .pred p1, p2;\n"
" mov.b32 {t1, t2}, %2;\n"
" setp.eq.s16 p1, t1, %4;\n"
" setp.eq.s16 p2, t2, %4;\n"
" fma.rn.f16x2.relu t3, %1, %2, %3;\n"
" and.b32 t4, t3, %5;\n"
" selp.b32 t5, t4, t3, p1;\n"
" and.b32 t6, t5, %6;\n"
" selp.b32 %0, t6, t5, p2;\n"
"}\n"
: "=r"(reinterpret_cast<uint32_t &>(ptr_activations[0]))
: "r"(ptr_scale_bias[0]), "r"(reinterpret_cast<uint32_t &>(ptr_activations[0])),
"r"(ptr_scale_bias[1]), "n"(cutlass::arch::OOB_NAN_F16), "n"(0xffff0000), "n"(0x0000ffff));
#endif
#else
assert(0);
#endif
}
CUTLASS_DEVICE
void operator()(FragmentActivations &activations,
FragmentScaleBias const &scale_bias) {
MmaOperand *ptr_activations = reinterpret_cast<MmaOperand *>(&activations);
ScaleBiasOperand const *ptr_scale_bias =
reinterpret_cast<ScaleBiasOperand const *>(&scale_bias);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < (NumActivations / MmaElements); ++i) {
transform(ptr_activations[i], ptr_scale_bias[(i / MmaRows)]);
}
}
};
} // namespace warp
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/warp/scale_bias_relu_transform.h/0 | {
"file_path": "include/cutlass/conv/warp/scale_bias_relu_transform.h",
"repo_id": "include",
"token_count": 3357
} | 21 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing elementwise operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/epilogue/collective/detail.hpp"
#include "cute/tensor.hpp"
#include "cute/numeric/numeric_types.hpp"
#include "cutlass/trace.h"
#include "cutlass/cuda_host_adapter.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace collective {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Applies an element wise operation to all elements within the fragment
// and writes them out to destination storage.
template <
class StrideC_,
class StrideD_,
class ThreadEpilogueOp_,
class EpilogueSchedule_
>
class DefaultEpilogueArray {
public:
//
// Type Aliases
//
using EpilogueSchedule = EpilogueSchedule_;
using DispatchPolicy = EpilogueSchedule_;
// derived types of output thread level operator
using ThreadEpilogueOp = ThreadEpilogueOp_;
using ElementOutput = typename ThreadEpilogueOp::ElementOutput;
using ElementAccumulator = typename ThreadEpilogueOp::ElementAccumulator;
using ElementCompute = typename ThreadEpilogueOp::ElementCompute;
using ElementScalar = ElementCompute;
using ElementC = typename ThreadEpilogueOp::ElementC;
using StrideC = StrideC_;
using InternalStrideC = cute::remove_pointer_t<StrideC>;
using ElementD = typename ThreadEpilogueOp::ElementD;
using StrideD = StrideD_;
using InternalStrideD = cute::remove_pointer_t<StrideD>;
using GmemTiledCopyC = void;
using GmemTiledCopyD = void;
static const int kOutputAlignment = ThreadEpilogueOp::kCount;
using AlignmentType = typename cute::uint_bit<sizeof_bits<ElementOutput>::value * kOutputAlignment>::type;
static_assert(cute::is_same_v<EpilogueSchedule, PtrArrayNoSmemWarpSpecialized> || cute::is_same_v<EpilogueSchedule, PtrArrayDefault>, "Incompatible epilogue schedule.");
static_assert(rank(InternalStrideC{}) == 3, "StrideCD must be rank-3: [M, N, L]");
static_assert(rank(InternalStrideD{}) == 3, "StrideCD must be rank-3: [M, N, L]");
struct SharedStorage { };
using TensorMapStorage = SharedStorage;
// Host side epilogue arguments
struct Arguments {
typename ThreadEpilogueOp::Params thread{};
ElementC const** ptr_C = nullptr;
StrideC dC{};
ElementD** ptr_D = nullptr;
StrideD dD{};
};
// Device side epilogue params
using Params = Arguments;
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(
ProblemShape const&,
Arguments const& args,
[[maybe_unused]] void* workspace) {
return args;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args, int sm_count) {
return 0;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
return cutlass::Status::kSuccess;
}
template<class ProblemShape>
static bool
can_implement(
[[maybe_unused]] ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
return true;
}
CUTLASS_HOST_DEVICE
DefaultEpilogueArray(Params const& params_)
: params(params_) { }
CUTLASS_DEVICE
bool
is_source_needed() {
// For Ptr-Array or Grouped Gemm we cannot determine if source is needed based on first beta.
return true;
}
template<
class ProblemShapeMNKL,
class BlockShapeMNK,
class BlockCoordMNKL,
class FrgEngine, class FrgLayout,
class TiledMma,
class ResidueMNK
>
CUTLASS_HOST_DEVICE void
operator()(
ProblemShapeMNKL problem_shape_mnkl,
BlockShapeMNK blk_shape_MNK,
BlockCoordMNKL blk_coord_mnkl,
cute::Tensor<FrgEngine, FrgLayout> const& accumulators,
TiledMma tiled_mma,
ResidueMNK residue_mnk,
int thread_idx,
[[maybe_unused]] char* smem_buf)
{
using namespace cute;
using X = Underscore;
static_assert(rank(ProblemShapeMNKL{}) == 4, "ProblemShapeMNKL must be rank 4");
static_assert(is_static<BlockShapeMNK>::value, "ThreadBlock tile shape must be static");
static_assert(rank(BlockShapeMNK{}) == 3, "BlockShapeMNK must be rank 3");
static_assert(rank(BlockCoordMNKL{}) == 4, "BlockCoordMNKL must be rank 3");
// Separate out problem shape for convenience
auto M = get<0>(problem_shape_mnkl);
auto N = get<1>(problem_shape_mnkl);
auto L = get<3>(problem_shape_mnkl);
// Batches are managed by using appropriate pointers to C and D matrices
const int32_t mock_L = 1;
const int32_t mock_l_coord = 0;
// Slice to get the tile this CTA is responsible for
auto [m_coord, n_coord, k_coord, l_coord] = blk_coord_mnkl;
// If scalar alpha/beta are provided, i.e., same alpha/beta applies to all batches/groups.
// If pointers to alpha/beta are provided, i.e., alpha/beta can differ between batches/groups,
// we get the correct alpha/beta values for the current batch/group using group index.
ThreadEpilogueOp epilogue_op = ThreadEpilogueOp(params.thread, l_coord);
if (epilogue_op.is_source_needed() && params.dC == nullptr) {
// Beta value is non-zero while pointer to C is a nullptr
assert(0);
}
InternalStrideC stride_c;
InternalStrideD stride_d;
if constexpr (!cute::is_same_v<InternalStrideC, StrideC>) {
// If grouped gemm
if (epilogue_op.is_source_needed()) {
stride_c = detail::get_epilogue_stride<EpilogueSchedule>(params.dC[l_coord]);
}
stride_d = detail::get_epilogue_stride<EpilogueSchedule>(params.dD[l_coord]);
}
else {
stride_c = detail::get_epilogue_stride<EpilogueSchedule>(params.dC);
stride_d = detail::get_epilogue_stride<EpilogueSchedule>(params.dD);
}
// Represent the full output tensor
ElementC const* ptr_C_l = nullptr;
if (epilogue_op.is_source_needed()) {
ptr_C_l = params.ptr_C[l_coord];
}
Tensor mC_mnl = make_tensor(make_gmem_ptr(ptr_C_l), make_shape(M,N,mock_L), stride_c); // (m,n,l)
Tensor mD_mnl = make_tensor(make_gmem_ptr(params.ptr_D[l_coord]), make_shape(M,N,mock_L), stride_d); // (m,n,l)
Tensor gC_mnl = local_tile(mC_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l)
Tensor gD_mnl = local_tile(mD_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l)
Tensor gC = gC_mnl(_,_,m_coord,n_coord, mock_l_coord); // (BLK_M,BLK_N)
Tensor gD = gD_mnl(_,_,m_coord,n_coord, mock_l_coord); // (BLK_M,BLK_N)
// Partition source and destination tiles to match the accumulator partitioning
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCgD = thr_mma.partition_C(gD); // (VEC,THR_M,THR_N)
Tensor tCgC = thr_mma.partition_C(gC); // (VEC,THR_M,THR_N)
static_assert(is_static<FrgLayout>::value, "Accumulator layout must be static");
CUTE_STATIC_ASSERT_V(size(tCgC) == size(tCgD),
"Source and destination must have the same number of elements.");
CUTE_STATIC_ASSERT_V(size(tCgD) == size(accumulators),
"Accumulator count must have the same destination element count.");
// Make an identity coordinate tensor for predicating our output MN tile
auto cD = make_identity_tensor(make_shape(unwrap(shape<0>(gD)), unwrap(shape<1>(gD))));
Tensor tCcD = thr_mma.partition_C(cD);
// source is needed
if (epilogue_op.is_source_needed()) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(accumulators); ++i) {
if (elem_less(tCcD(i), make_coord(get<0>(residue_mnk), get<1>(residue_mnk)))) {
tCgD(i) = epilogue_op(accumulators(i), tCgC(i));
}
}
}
// source is not needed, avoid load
else {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(accumulators); ++i) {
if (elem_less(tCcD(i), make_coord(get<0>(residue_mnk), get<1>(residue_mnk)))) {
tCgD(i) = epilogue_op(accumulators(i));
}
}
}
}
private:
Params params;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace collective
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/collective/default_epilogue_array.hpp/0 | {
"file_path": "include/cutlass/epilogue/collective/default_epilogue_array.hpp",
"repo_id": "include",
"token_count": 3939
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing conversion operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Converts the result without other operations
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class Convert {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementAccumulator_;
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = FragmentAccumulator;
static FloatRoundStyle const kRound = Round;
static bool const kIsHeavy = false;
/// Host-constructable parameters structure
struct Params {
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() {}
};
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
Convert(Params const ¶ms = Params()) {
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
}
/// Returns true if source is needed based on state of runtime arguments
CUTLASS_HOST_DEVICE
constexpr bool is_source_needed() const {
return false;
}
/// Constexpr function to enable the compiler to optimize away the source loading if it is
/// never needed.
CUTLASS_HOST_DEVICE
constexpr bool is_source_ever_needed() const {
return false;
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source = FragmentOutput(),
ElementCompute uniform = ElementCompute(0)) const {
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementAccumulator, kCount, Round> destination_converter;
return destination_converter(accumulator);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
| include/cutlass/epilogue/thread/conversion_op.h/0 | {
"file_path": "include/cutlass/epilogue/thread/conversion_op.h",
"repo_id": "include",
"token_count": 1313
} | 23 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Default configuration for epilogue computing absolute maximum of output and auxiliary outputs.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/threadblock/epilogue_with_absmax.h"
#include "cutlass/layout/permute.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for absolute-maximum-computing epilogues with TensorOps
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename ElementAuxOutput,
typename ElementVector,
typename OutputOp,
int ElementsPerAccess,
bool ScatterD = false,
typename PermuteDLayout = layout::NoPermute
>
struct DefaultEpilogueWithAbsMax {
/// Use defaults related to the existing epilogue
using Base = DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
ElementsPerAccess
>;
//
// Stores the output
//
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
typename Base::OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout
>;
//
// Stores the auxiliary output
//
using AuxOutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
typename Base::OutputTileThreadMap,
ElementAuxOutput,
ScatterD,
PermuteDLayout
>;
/// Define the epilogue
using Epilogue = EpilogueWithAbsMax<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputTileIterator,
AuxOutputTileIterator,
ElementVector,
typename Base::AccumulatorFragmentIterator,
typename Base::WarpTileIterator,
typename Base::SharedLoadIterator,
OutputOp,
typename Base::Padding,
Base::kFragmentsPerIteration
>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/default_epilogue_with_absmax.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/default_epilogue_with_absmax.h",
"repo_id": "include",
"token_count": 1219
} | 24 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMM/CONV to store accumulator in shared memory after
applying scale, bias loaded from global memory and element-wise operations.
This Epilogue is typically used in fused GEMM/CONV to stage the intermediate accumulator.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename SmemTileIterator_, ///< Shared memory Tile iterator to output to shared memory
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename ScaleBiasIterator_, ///< Iterator to load scale and bias from global memory
typename OutputOp_ ///< Output operator
>
class EpilogueSmemAccumulator {
public:
using SmemTileIterator = SmemTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using ScaleBiasIterator = ScaleBiasIterator_;
using OutputOp = OutputOp_;
/// Fragment of accumulator tile
using FragmentAccumulator = typename AccumulatorFragmentIterator::Fragment;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Fragment of Scale and Bias loaded from global memory
using FragmentScaleBias = typename ScaleBiasIterator::Fragment;
static const bool PerChannelScale = (OutputOp::kScale ==
epilogue::thread::ScaleType::OnlyAlphaPerChannelScaling);
/// Constructor
CUTLASS_DEVICE
EpilogueSmemAccumulator() {}
/// Streams the result to shared memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
SmemTileIterator smem_iterator, ///< Tile iterator for destination in shared memory
AccumulatorTile const &accumulator, ///< Complete warp-level accumulator tile
ScaleBiasIterator scale_iterator, ///< iterator for scale vector in global memory
ScaleBiasIterator bias_iterator) { ///< iterator for bias vector in global memory
// Fragment to load scale bias from global memory
FragmentScaleBias tb_frag_scale;
FragmentScaleBias tb_frag_bias;
/// Fragment Iterator to load slice of accumulator tile
AccumulatorFragmentIterator frag_iterator_accum(accumulator);
FragmentAccumulator tb_frag_accum;
/// Epilogue output fragment
typename SmemTileIterator::Fragment tb_frag_smem;
/// Load scale and bias from global memory
if(PerChannelScale)
scale_iterator.load(tb_frag_scale);
bias_iterator.load(tb_frag_bias);
/// Iterate over the accumulator tile and store to shared memory
CUTLASS_PRAGMA_UNROLL
for (int rid = 0; rid < AccumulatorFragmentIterator::TileIterations::kRow; ++rid) {
CUTLASS_PRAGMA_UNROLL
for (int cid = 0; cid < AccumulatorFragmentIterator::TileIterations::kColumn; ++cid) {
using AccumulatorAccessType = typename OutputOp::FragmentAccumulator;
using ScaleBiasAccessType = typename OutputOp::FragmentScaleBias;
using FragmentSmemAccessType = typename OutputOp::FragmentOutput;
ScaleBiasAccessType const * scale_frag_ptr =
reinterpret_cast<ScaleBiasAccessType const *>(&tb_frag_scale);
ScaleBiasAccessType const * bias_frag_ptr =
reinterpret_cast<ScaleBiasAccessType const *>(&tb_frag_bias);
FragmentSmemAccessType * smem_frag_ptr =
reinterpret_cast<FragmentSmemAccessType *>(&tb_frag_smem);
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < AccumulatorFragmentIterator::kIterationsPerTile; ++idx) {
frag_iterator_accum.load(tb_frag_accum);
++frag_iterator_accum;
AccumulatorAccessType const * accumulator_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&tb_frag_accum);
const int kOutputIterations = FragmentAccumulator::kElements / OutputOp::kCount;
CUTLASS_PRAGMA_UNROLL
for (int it = 0; it < kOutputIterations; it++) {
smem_frag_ptr[idx * kOutputIterations + it] = output_op(accumulator_frag_ptr[it],
scale_frag_ptr[cid * kOutputIterations + it], bias_frag_ptr[cid * kOutputIterations + it]);
}
}
smem_iterator.store(tb_frag_smem);
++smem_iterator;
}
}
}
/// Streams the result to shared memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
SmemTileIterator smem_iterator, ///< Tile iterator for destination in shared memory
AccumulatorTile const &accumulator) { ///< Complete warp-level accumulator tile
/// Fragment Iterator to load slice of accumulator tile
AccumulatorFragmentIterator frag_iterator_accum(accumulator);
FragmentAccumulator tb_frag_accum;
/// Epilogue output fragment
typename SmemTileIterator::Fragment tb_frag_smem;
/// Iterate over the accumulator tile and store to shared memory
CUTLASS_PRAGMA_UNROLL
for (int rid = 0; rid < AccumulatorFragmentIterator::TileIterations::kRow; ++rid) {
CUTLASS_PRAGMA_UNROLL
for (int cid = 0; cid < AccumulatorFragmentIterator::TileIterations::kColumn; ++cid) {
using AccumulatorAccessType = typename OutputOp::FragmentAccumulator;
using FragmentSmemAccessType = typename OutputOp::FragmentOutput;
FragmentSmemAccessType * smem_frag_ptr =
reinterpret_cast<FragmentSmemAccessType *>(&tb_frag_smem);
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < AccumulatorFragmentIterator::kIterationsPerTile; ++idx) {
frag_iterator_accum.load(tb_frag_accum);
++frag_iterator_accum;
AccumulatorAccessType const * accumulator_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&tb_frag_accum);
const int kOutputIterations = FragmentAccumulator::kElements / OutputOp::kCount;
CUTLASS_PRAGMA_UNROLL
for (int it = 0; it < kOutputIterations; it++) {
smem_frag_ptr[idx * kOutputIterations + it] = output_op(accumulator_frag_ptr[it]);
}
}
smem_iterator.store(tb_frag_smem);
++smem_iterator;
}
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_smem_accumulator.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_smem_accumulator.h",
"repo_id": "include",
"token_count": 3246
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Metaprogram for determining the mapping of output elements to threads for epilogue tiles.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/fast_math.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tuple defining point in output tile
template <
int Column,
int Row,
int Group,
int Cluster,
int Tile
>
struct OutputTileShape {
static int const kColumn = Column;
static int const kRow = Row;
static int const kGroup = Group;
static int const kCluster = Cluster;
static int const kTile = Tile;
static int const kCount = kColumn * kRow * kGroup * kCluster * kTile;
};
////////////////////////////////////////////////////////////////////////////////
template <typename Iterations, typename Delta>
struct OutputTileThreadMapHelpers {
/// Determines the iteration index of a vector access according to the thread map
CUTLASS_HOST_DEVICE
static void iteration_index(
int &column_idx,
int &row_idx,
int &group_idx,
int &cluster_idx,
int &tile_idx,
int iter_idx) {
column_idx = iter_idx % Iterations::kColumn;
int residual = iter_idx / Iterations::kColumn;
row_idx = residual % Iterations::kRow;
residual = residual / Iterations::kRow;
group_idx = residual % Iterations::kGroup;
residual = residual / Iterations::kGroup;
cluster_idx = residual % Iterations::kCluster;
tile_idx = residual / Iterations::kCluster;
}
/// Computes the offset of a given vector access
CUTLASS_HOST_DEVICE
static MatrixCoord iteration_offset(int iter_idx) {
int column_idx;
int row_idx;
int group_idx;
int cluster_idx;
int tile_idx;
iteration_index(column_idx, row_idx, group_idx, cluster_idx, tile_idx, iter_idx);
return
MatrixCoord(
row_idx * Delta::kRow +
group_idx * Delta::kGroup +
cluster_idx * Delta::kCluster +
tile_idx * Delta::kTile,
column_idx * Delta::kColumn);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ThreadMap_,
typename Shape_,
typename Iterations_,
typename Delta_,
typename Count_
>
struct OutputTileThreadMap : public OutputTileThreadMapHelpers<Iterations_, Delta_> {
/// Conventional thread map (concept: ThreadMap)
using ThreadMap = ThreadMap_;
/// Number of threads participating in the operation
static int const kThreads = ThreadMap::kThreads;
/// Number of scalar elements per access
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
/// Shape of the tile
using Shape = Shape_;
/// Iterations performed by each thread
using Iterations = Iterations_;
/// Delta between accesses
using Delta = Delta_;
/// Number of iterator iterations
using Count = Count_;
/// Initial offset function
CUTLASS_HOST_DEVICE
static MatrixCoord initial_offset(int thread_idx) {
using Index = typename layout::PitchLinearCoord::Index;
layout::PitchLinearCoord coord = ThreadMap::initial_offset(thread_idx);
Index cluster = coord.strided() / (Shape::kGroup * Shape::kRow);
Index cluster_residual = coord.strided() % (Shape::kGroup * Shape::kRow);
Index group = cluster_residual / (Shape::kRow);
Index row = cluster_residual % (Shape::kRow);
return MatrixCoord{
row + group * Shape::kRow * Count::kRow
+ cluster * Shape::kGroup * Count::kGroup * Shape::kRow * Count::kRow,
coord.contiguous()
};
}
};
////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// RowArrangement determines how one or more warps cover a region of consecutive rows.
template <
typename Shape,
int WarpsRemaining,
int ElementsPerAccess,
int ElementSize,
bool Is2dTile
>
struct RowArrangement;
/// RowArrangement in which each warp's access is a 1D tiled arrangement.
template <
typename Shape,
int WarpsRemaining,
int ElementsPerAccess,
int ElementSize
>
struct RowArrangement<Shape, WarpsRemaining, ElementsPerAccess, ElementSize, false> {
static int const kWarpSize = 32;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kElementSize = ElementSize;
static int const kIterationsRow = 1;
static int const kDeltaRow = 1;
static int const kIterationsColumn = Shape::kColumn / kElementsPerAccess / kWarpSize;
static int const kDeltaColumn = kWarpSize * kElementsPerAccess;
static int const kAccessWidth = kWarpSize;
static int const kAccessRows = 1;
static int const kWarpPartitionsRow = 1;
static int const kWarpPartitionsColumn = WarpsRemaining;
};
/// RowArrangement in which each warp's access is a 2D tiled arrangement.
template <
typename Shape,
int WarpsRemaining,
int ElementsPerAccess,
int ElementSize
>
struct RowArrangement<Shape, WarpsRemaining, ElementsPerAccess, ElementSize, true> {
static int const kMemoryAccessSize = 256; // Preferred access size
static int const kWarpSize = 32;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kElementSize = ElementSize;
struct Detail {
static int const kShapeRow = Shape::kRow / WarpsRemaining;
static int const kShapeWidth = Shape::kColumn / kElementsPerAccess;
static int const kTargetMemoryAccessWidth =
kMemoryAccessSize / (kElementsPerAccess * kElementSize / 8);
static int const kTargetAccessRows = kWarpSize / kTargetMemoryAccessWidth;
};
static int const kAccessWidth =
(Detail::kTargetAccessRows > Detail::kShapeRow ?
kWarpSize / Detail::kShapeRow
: const_min(
Detail::kShapeWidth,
const_min(kWarpSize, kMemoryAccessSize / (kElementsPerAccess * kElementSize / 8))
));
static int const kAccessRows =
(Detail::kTargetAccessRows > Detail::kShapeRow ?
Detail::kShapeRow
: const_min(Shape::kRow, kWarpSize / kAccessWidth));
static int const kIterationsRow = Detail::kShapeRow / kAccessRows;
static int const kDeltaRow = kAccessRows;
static int const kIterationsColumn = Detail::kShapeWidth / kAccessWidth;
static int const kDeltaColumn = kAccessWidth * kElementsPerAccess;
static_assert( kAccessWidth * kElementsPerAccess <= Shape::kColumn, "Accessing too many elements per access");
static_assert( kIterationsColumn > 0, "Iteration Count Column must be > 0" );
static_assert( kIterationsRow > 0, "Iteration Count Row must be > 0" );
static int const kWarpPartitionsRow = 1;
static int const kWarpPartitionsColumn = 1;
};
}
////////////////////////////////////////////////////////////////////////////////
/// Template metaprogram for partitioning a 4D space across warps to achieve several performance
/// objectives:
///
/// - coalesced memory accesses in units of 128 Byte lines
/// - minimal address arithmetic
/// - minimal predicate calculations
///
template <
typename Shape_,
typename Count_,
int Threads,
int ElementsPerAccess,
int ElementSize
>
struct OutputTileOptimalThreadMap {
using Shape = Shape_;
using Count = Count_;
static int const kWarpSize = 32;
static int const kThreads = Threads;
static int const kWarpCount = kThreads / kWarpSize;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kElementSize = ElementSize;
//
// Metaprogram computation
//
struct Detail {
// Clusters
static int const kIterationsCluster =
((Shape::kCluster > kWarpCount) ?
Shape::kCluster / kWarpCount
: 1);
static int const kDeltaCluster =
((Shape::kCluster > kWarpCount) ?
Shape::kRow * Count::kRow * Shape::kGroup * Count::kGroup * Shape::kCluster / kIterationsCluster
: 1);
static int const kCompactedDeltaCluster =
((Shape::kCluster > kWarpCount) ?
Shape::kRow * Shape::kGroup * Shape::kCluster / kIterationsCluster
: 1);
static int const kWarpPartitionsCluster =
((Shape::kCluster > kWarpCount) ?
kWarpCount
: kWarpCount / Shape::kCluster);
static int const kWarpsRemainingForGroups =
((Shape::kCluster > kWarpCount) ? 1 : kWarpCount / Shape::kCluster);
// Groups
static int const kIterationsGroup =
((Shape::kGroup > kWarpsRemainingForGroups) ?
Shape::kGroup / kWarpsRemainingForGroups
: 1);
static int const kDeltaGroup =
((Shape::kGroup > kWarpsRemainingForGroups) ?
Shape::kRow * Count::kRow * Shape::kGroup / kIterationsGroup
: 1);
static int const kCompactedDeltaGroup =
((Shape::kGroup > kWarpsRemainingForGroups) ?
Shape::kRow * Shape::kGroup / kIterationsGroup
: 1);
static int const kWarpPartitionsGroup =
((Shape::kGroup > kWarpsRemainingForGroups) ?
1
: kWarpsRemainingForGroups / Shape::kGroup);
static int const kWarpsRemainingForRows =
((Shape::kGroup > kWarpsRemainingForGroups) ?
1
: kWarpsRemainingForGroups / Shape::kGroup);
// Rows
using RowArrangement = detail::RowArrangement<
Shape,
kWarpsRemainingForRows,
kElementsPerAccess,
kElementSize,
(Shape::kRow > kWarpsRemainingForRows)
>;
// Warp partitions
using WarpPartitions = OutputTileShape<
RowArrangement::kWarpPartitionsColumn,
RowArrangement::kWarpPartitionsRow,
kWarpPartitionsGroup,
kWarpPartitionsCluster,
1>;
static int const kAccessWidth = RowArrangement::kAccessWidth;
static int const kAccessRows = RowArrangement::kAccessRows;
};
//
// Output
//
using Iterations = OutputTileShape<
Detail::RowArrangement::kIterationsColumn,
Detail::RowArrangement::kIterationsRow,
Detail::kIterationsGroup,
Detail::kIterationsCluster,
1>;
using Delta = OutputTileShape<
Detail::RowArrangement::kDeltaColumn,
Detail::RowArrangement::kDeltaRow,
Detail::kDeltaGroup,
Detail::kDeltaCluster,
1>;
/// Initial offset function
CUTLASS_DEVICE
static MatrixCoord initial_offset(int thread_idx) {
// int warp_idx = __shfl_sync(0xffffffff, thread_idx / kWarpSize, 0);
int warp_idx = thread_idx / kWarpSize;
int lane_idx = thread_idx % kWarpSize;
// Compute warp location
int cluster_idx = warp_idx / Detail::WarpPartitions::kCluster;
int residual_cluster = warp_idx % Detail::WarpPartitions::kCluster;
int group_idx = residual_cluster / Detail::WarpPartitions::kGroup;
int residual_group = residual_cluster % Detail::WarpPartitions::kGroup;
int row_idx = residual_group / Detail::WarpPartitions::kRow;
int col_idx = residual_group % Detail::WarpPartitions::kRow;
// Compute per-lane offset
int lane_row_offset = lane_idx / Detail::kAccessWidth;
int lane_col_offset = lane_idx % Detail::kAccessWidth;
// Compute coordinate in output space
int cluster_offset = cluster_idx * Shape::kRow * Count::kRow * Shape::kGroup * Count::kGroup;
int group_offset = group_idx * Shape::kRow * Count::kRow;
int row_offset = row_idx * Iterations::kRow * Detail::kAccessRows;
int column_offset = col_idx * Iterations::kColumn * Detail::kAccessWidth * kElementsPerAccess;
return MatrixCoord(
cluster_offset + group_offset + row_offset + lane_row_offset,
column_offset + lane_col_offset * kElementsPerAccess
);
}
/// Computes the offset of a given vector access
CUTLASS_HOST_DEVICE
static MatrixCoord iteration_offset(int iter_idx) {
return OutputTileThreadMapHelpers<Iterations, Delta>::iteration_offset(iter_idx);
}
/// Compacted thread map in which the 4D region is contiguous
struct CompactedThreadMap {
using Shape = Shape_;
using TileShape = MatrixShape<
Shape::kTile * Shape::kCluster * Shape::kGroup * Shape::kRow,
Shape::kColumn
>;
using Iterations = OutputTileShape<
Detail::RowArrangement::kIterationsColumn,
Detail::RowArrangement::kIterationsRow,
Detail::kIterationsGroup,
Detail::kIterationsCluster,
1>;
using Delta = OutputTileShape<
Detail::RowArrangement::kDeltaColumn,
Detail::RowArrangement::kDeltaRow,
Detail::kCompactedDeltaGroup,
Detail::kCompactedDeltaCluster,
1>;
/// Number of elements within each vector access
static int const kElementsPerAccess = ElementsPerAccess;
/// Number of threads
static int const kThreads = Threads;
/// Function to compute each thread's initial offset
CUTLASS_DEVICE
static MatrixCoord initial_offset(int thread_idx) {
// int warp_idx = __shfl_sync(0xffffffff, thread_idx / kWarpSize, 0);
int warp_idx = thread_idx / kWarpSize;
int lane_idx = thread_idx % kWarpSize;
// Compute warp location
int cluster_idx = warp_idx / Detail::WarpPartitions::kCluster;
int residual_cluster = warp_idx % Detail::WarpPartitions::kCluster;
int group_idx = residual_cluster / Detail::WarpPartitions::kGroup;
int residual_group = residual_cluster % Detail::WarpPartitions::kGroup;
int row_idx = residual_group / Detail::WarpPartitions::kRow;
int col_idx = residual_group % Detail::WarpPartitions::kRow;
// Compute per-lane offset
int lane_row_offset = lane_idx / Detail::kAccessWidth;
int lane_col_offset = lane_idx % Detail::kAccessWidth;
// Compute coordinate in output space
int cluster_offset = cluster_idx * Shape::kRow * Shape::kGroup;
int group_offset = group_idx * Shape::kRow;
int row_offset = row_idx * Iterations::kRow * Detail::kAccessRows;
int column_offset = col_idx * Iterations::kColumn * Detail::kAccessWidth * kElementsPerAccess;
MatrixCoord coord(
cluster_offset + group_offset + row_offset + lane_row_offset,
column_offset + lane_col_offset * kElementsPerAccess
);
return coord;
}
};
};
////////////////////////////////////////////////////////////////////////////////
/// Template metaprogram for partitioning a 3D interleaved layout across warps
/// to achieve several performance objectives:
///
/// - coalesced memory accesses in units of 64 Byte lines
/// - minimal address arithmetic
/// - minimal predicate calculations
///
template <typename WarpCount_, typename Iterations_, int Threads,
int ElementsPerAccess, int ElementSize>
struct InterleavedOutputTileThreadMap {
using WarpCount = WarpCount_;
static int const kWarpSize = 32;
static int const kThreads = Threads;
static int const kWarpCount = kThreads / kWarpSize;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kElementSize = ElementSize;
//
// Metaprogram computation
//
struct Detail {};
//
// Output
//
using Iterations = Iterations_;
using Delta = layout::PitchLinearShape<kWarpSize * kElementsPerAccess, 1>;
/// Initial offset function
CUTLASS_HOST_DEVICE
static layout::PitchLinearCoord initial_offset(int thread_idx) {
int warp_idx = thread_idx / kWarpSize;
int lane_idx = thread_idx % kWarpSize;
// Compute warp location
layout::PitchLinearCoord warp_footprint{
Delta::kContiguous * Iterations::kContiguous,
Delta::kStrided * Iterations::kStrided};
layout::PitchLinearCoord warp_offset{warp_idx % WarpCount::kContiguous,
warp_idx / WarpCount::kContiguous};
// Compute per-lane offset
layout::PitchLinearCoord thread_offset_in_warp{
lane_idx * kElementsPerAccess, 0};
layout::PitchLinearCoord thread_offset_in_threadblock_tile =
warp_footprint * warp_offset + thread_offset_in_warp;
return thread_offset_in_threadblock_tile;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template metaprogram for partitioning a 4D interleaved layout across warps
/// to achieve several performance objectives:
///
/// - coalesced memory accesses in units of 64 Byte lines
/// - minimal address arithmetic
/// - minimal predicate calculations
///
template <typename WarpCount_, typename Iterations_, int Threads,
int ElementsPerAccess, int ElementSize>
struct InterleavedConvOutputTileThreadMap {
using WarpCount = WarpCount_;
static int const kWarpSize = 32;
static int const kThreads = Threads;
static int const kWarpCount = kThreads / kWarpSize;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kElementSize = ElementSize;
//
// Metaprogram computation
//
struct Detail {};
//
// Output
//
using Iterations = Iterations_;
using Delta = MatrixShape<kWarpSize / 4, 4 * kElementsPerAccess>;
/// Initial offset function
CUTLASS_HOST_DEVICE
static MatrixCoord initial_offset(int thread_idx) {
int warp_idx = thread_idx / kWarpSize;
int lane_idx = thread_idx % kWarpSize;
// Compute warp location
MatrixCoord warp_footprint{
Delta::kRow * Iterations::kRow,
Delta::kColumn * Iterations::kColumn,
};
MatrixCoord warp_offset{warp_idx % WarpCount::kRow,
warp_idx / WarpCount::kRow};
// Compute per-lane offset
MatrixCoord thread_offset_in_warp{lane_idx / 4,
(lane_idx % 4) * kElementsPerAccess};
MatrixCoord thread_offset_in_threadblock_tile =
warp_footprint * warp_offset + thread_offset_in_warp;
return thread_offset_in_threadblock_tile;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
| include/cutlass/epilogue/threadblock/output_tile_thread_map.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/output_tile_thread_map.h",
"repo_id": "include",
"token_count": 6655
} | 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile
that participate in one warp-level store operation.
Typically, the accumulator tile is the largest single block of register-backed storage
within the kernel. Storing it to memory is best accomplished by partitioning it into
smaller tiles and storing these sequentially.
Round trips through shared memory during the Epilogue phase require partitioning, as
shared memory capacity is typically insufficient for a threadblock's total accumulator
size.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/epilogue/warp/tensor_op_policy.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
///
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorElementC, ///< matrix multiply operation data type (concept: data type)
typename OperatorFragmentC, ///< matrix multiply operation fragment (concept: Array)
typename Layout ///< target shared memory layout
>
class FragmentIteratorTensorOp;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major shared memory
template <
typename WarpShape_, ///< shape of the warp-level GEMM tile
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorElementC_, ///< matrix multiply operation data type (concept: data type)
typename OperatorFragmentC_ ///< matrix multiply operation fragment (concept: Array)
>
class FragmentIteratorTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using OperatorElementC = OperatorElementC_;
using OperatorFragmentC = OperatorFragmentC_;
using Layout = layout::RowMajor;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
OperatorElementC,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
OperatorElementC,
OperatorFragmentC::kElements * Policy::OperatorCount::kRow * Policy::OperatorCount::kColumn>;
using OutputAccumulatorTile = AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
using TileIterations = typename Policy::TileIterations;
static int const kIterationsPerTile = kIterations / TileIterations::kCount;
private:
/// Internal access type
using AccessType = Array<OperatorElementC, Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp(AccumulatorTile const &accum):
accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0) {
}
/// Increments
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp &operator++() {
++index_;
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp &operator--() {
--index_;
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, int index_offset = 0) const {
int index = index_ + index_offset;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int accumulator_access_offset =
index + n * Policy::kAccumulatorColumnStride / Policy::kElementsPerAccess;
frag_ptr[n] = accumulators_[accumulator_access_offset];
}
}
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for col-major shared memory
/// Only works for 168x tensor core kernels
template <
typename WarpShape_, ///< shape of the warp-level GEMM tile
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorElementC_, ///< matrix multiply operation data type (concept: data type)
typename OperatorFragmentC_ ///< matrix multiply operation fragment (concept: Array)
>
class FragmentIteratorTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_, layout::ColumnMajor> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using OperatorElementC = OperatorElementC_;
using OperatorFragmentC = OperatorFragmentC_;
using Layout = layout::ColumnMajor;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
OperatorElementC,
4 * Policy::OperatorCount::kRow * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
OperatorElementC,
OperatorFragmentC::kElements * Policy::OperatorCount::kRow * Policy::OperatorCount::kColumn>;
using OutputAccumulatorTile = AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
using TileIterations = typename Policy::TileIterations;
static int const kIterationsPerTile = kIterations / TileIterations::kCount;
private:
/// Internal access type
using AccessType = Array<OperatorElementC, Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp(AccumulatorTile const &accum):
accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0) {
}
/// Increments
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp &operator++() {
++index_;
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp &operator--() {
--index_;
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, int index_offset = 0) const {
int index = index_ + index_offset;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Policy::kAccumulatorRowStride; ++i) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < (Policy::OperatorCount::kRow * 2); ++m) {
int accumulator_access_offset =
index * Policy::kAccumulatorColumnStride + m * Policy::kAccumulatorRowStride / Policy::kElementsPerAccess + i;
frag_ptr[m + i * Policy::OperatorCount::kRow * 2] = accumulators_[accumulator_access_offset];
}
}
}
};
////////////////////////////////////////////////////////////////////////////////
/// Dedicated to interleaved layout
template <
/// shape of the warp-level GEMM tile
typename WarpShape_,
/// matrix multiply operator shape (concept: gemm::GemmShape)
typename OperatorShape_,
/// matrix multiply operator data type (concept: data type)
typename OperatorElementC_,
/// matrix multiply operator fragment (concept: Array)
typename OperatorFragmentC_,
/// number of interleaved k
int InterleavedK>
class FragmentIteratorTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_,
layout::ColumnMajorInterleaved<InterleavedK>> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using OperatorElementC = OperatorElementC_;
using OperatorFragmentC = OperatorFragmentC_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// This is the fragment size produced by one access of the iterator.
using Fragment =
Array<OperatorElementC,
Policy::kElementsPerAccess * InterleavedK / OperatorShape::kN>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile =
Array<OperatorElementC, OperatorFragmentC::kElements *
Policy::OperatorCount::kRow *
Policy::OperatorCount::kColumn>;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
using TileIterations = typename Policy::TileIterations;
static int const kIterationsPerTile = kIterations / TileIterations::kCount;
private:
/// Internal access type
using AccessType =
Array<OperatorElementC, Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp(AccumulatorTile const &accum)
: accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0) {}
/// Increments
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp &operator++() {
++index_;
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp &operator--() {
--index_;
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, int index_offset = 0) const {
int index = index_ + index_offset;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < (InterleavedK / OperatorShape::kN); ++n) {
int index_m = index % (Policy::OperatorCount::kRow *
Policy::kIterationsPerInstruction);
int index_n = index / (Policy::OperatorCount::kRow *
Policy::kIterationsPerInstruction);
int accumulator_access_offset =
(index_m / Policy::kIterationsPerInstruction) *
(Policy::OperatorCount::kColumn *
Policy::kIterationsPerInstruction) +
(index_m % Policy::kIterationsPerInstruction) +
index_n * (InterleavedK / OperatorShape::kN) *
Policy::kIterationsPerInstruction +
n * Policy::kIterationsPerInstruction;
frag_ptr[n] = accumulators_[accumulator_access_offset];
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/warp/fragment_iterator_tensor_op.h/0 | {
"file_path": "include/cutlass/epilogue/warp/fragment_iterator_tensor_op.h",
"repo_id": "include",
"token_count": 4073
} | 27 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/arch/mma.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/detail/layout.hpp"
#include "cutlass/detail/collective.hpp"
#include "cutlass/detail/dependent_false.hpp"
#include "cute/atom/mma_traits_sm90_gmma.hpp"
#include "cute/atom/copy_traits_sm90_tma.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
//
// Some named constants
//
constexpr int tma_alignment_bytes = 16;
constexpr int cp_async_min_alignment_bytes = 4;
constexpr int sm90_smem_capacity_bytes = 232448;
// Maps 2.x A matrix layout tag to respective GMMA major mode enum
template <class ElementA, class LayoutA>
constexpr cute::GMMA::Major
gmma_ss_tag_to_major_A() {
// MN major mode is only valid for non-TF32, non-int and non-fp8 MMAs
if constexpr (cutlass::gemm::detail::is_mn_major_A<LayoutA>() &&
not cute::is_same_v<ElementA, tfloat32_t> &&
sizeof(ElementA) != 1) {
return cute::GMMA::Major::MN;
}
else {
return cute::GMMA::Major::K;
}
}
// Maps 2.x B matrix layout tag to respective GMMA major mode enum
template <class ElementB, class LayoutB>
constexpr cute::GMMA::Major
gmma_ss_tag_to_major_B() {
// MN major mode is only valid for non-TF32, non-int and non-fp8 MMAs
if constexpr (cutlass::gemm::detail::is_mn_major_B<LayoutB>() &&
not cute::is_same_v<ElementB, tfloat32_t> &&
sizeof(ElementB) != 1) {
return cute::GMMA::Major::MN;
}
else {
return cute::GMMA::Major::K;
}
}
template <class LayoutA>
constexpr cute::GMMA::Major
gmma_rs_tag_to_major_A() {
// MN major mode is only valid for non-TF32 and non-int MMAs
if constexpr (cutlass::gemm::detail::is_mn_major_A<LayoutA>()) {
return cute::GMMA::Major::MN;
}
else {
return cute::GMMA::Major::K;
}
}
template <class LayoutB>
constexpr cute::GMMA::Major
gmma_rs_tag_to_major_B() {
// MN major mode is only valid for non-TF32 and non-int MMAs
if constexpr (cutlass::gemm::detail::is_mn_major_B<LayoutB>()) {
return cute::GMMA::Major::MN;
}
else {
return cute::GMMA::Major::K;
}
}
// Maps a rank-1 cute::Shape<> representing the cluster shape on to the TMA atom that should be used with it
template <class UnimodalClusterShape>
constexpr auto
sm90_cluster_shape_to_tma_atom(UnimodalClusterShape) {
static_assert(cute::rank(UnimodalClusterShape{}) == 1,
"Use this function to figure out TMA for each mode individually.");
if constexpr (cute::size(UnimodalClusterShape{}) == 1) {
return cute::SM90_TMA_LOAD{};
}
else {
return cute::SM90_TMA_LOAD_MULTICAST{};
}
}
// Generates the most efficient possible TiledCopy with cp.async copy atom given a set of parameters.
template<int ThreadCount, class Element, int Alignment, class StrideType, class TileMN, class TileK>
constexpr auto
make_cp_async_gmem_tiled_copy() {
using namespace cute;
using AlignmentType = cute::uint_byte_t<static_cast<int>(sizeof(Element)) * Alignment>;
constexpr int TileSizeMN = cute::size(TileMN{});
constexpr int TileSizeK = cute::size(TileK{});
// Maximize the number of threads along the gmem major mode to promote coalesced reads
// While making sure our thread layout tiles the threadblock tile evenly
if constexpr (cutlass::gemm::detail::is_k_major<StrideType>()) {
// K major thread layout for K major gmem
constexpr int threads_major = (ThreadCount >= TileSizeK / Alignment) ? (TileSizeK / Alignment) : ThreadCount;
constexpr int threads_minor = ThreadCount / threads_major;
static_assert(threads_major > 0);
static_assert(ThreadCount % threads_major == 0);
static_assert(threads_minor == 0 || (TileSizeMN % threads_minor == 0));
return make_tiled_copy(
Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<AlignmentType>, Element>{},
Layout<Shape <Int<threads_minor>,Int<threads_major>>,
Stride<Int<threads_major>, _1>>{},
Layout<Shape<_1,Int<Alignment>>>{});
}
else if constexpr (cutlass::gemm::detail::is_mn_major<StrideType>()) {
// MN major thread layout for MN major gmem
constexpr int threads_major = (ThreadCount >= TileSizeMN / Alignment) ? (TileSizeMN / Alignment) : ThreadCount;
constexpr int threads_minor = ThreadCount / threads_major;
static_assert(threads_major > 0);
static_assert(ThreadCount % threads_major == 0);
static_assert(threads_minor == 0 || (TileSizeK % threads_minor == 0));
return make_tiled_copy(
Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<AlignmentType>, Element>{},
Layout<Shape <Int<threads_major>,Int<threads_minor>>,
Stride< _1,Int<threads_major>>>{},
Layout<Shape<Int<Alignment>,_1>>{});
}
else {
static_assert(cute::is_void_v<Element>, "Unsupported gmem layout for automatic gmem tiled copy builder.");
}
}
// Helper for SS GMMA smem selection that considers a tensor TileShape:
// (BLK_MN, BLK_K)
// or hierarchically
// ((BLK_MN0,BLK_MN1,...),(BLK_K0,BLK_K1,...))
// and returns the optimal GMMA::Layout that fits BLK_MN0 and BLK_K0
template <cute::GMMA::Major major, class ElementType, class BLK_MN, class BLK_K, const bool is_ws_transposed_B = false>
constexpr auto
rs_smem_selector() {
using namespace cute;
auto BLK_MN0 = size<0>(BLK_MN{});
auto BLK_K0 = size<0>(BLK_K{});
static_assert(BLK_MN0 % 8 == 0, "BLK_MN0 must be a multiple of 8.");
static_assert(BLK_K0 % 8 == 0, "BLK_K0 must be a multiple of 8.");
if constexpr (major == GMMA::Major::MN) {
if constexpr (sizeof(ElementType) == 4){
if constexpr (is_ws_transposed_B) {
// only optimized transpositionB(SW32 and SW128 for tf32) can be used, but prefer SW32 due to free bank conflict
if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0) {
return GMMA::Layout_MN_SW32_Atom<ElementType>{};
}
else {
static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0,
"BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{})");
}
}
else {
// Fall into SW32 due to free bank conflict
if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0) {
return GMMA::Layout_MN_SW32_Atom<ElementType>{};
}
else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0) {
return GMMA::Layout_MN_INTER_Atom<ElementType>{};
}
else {
static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0,
"BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{})");
}
}
}
// Used for int8, fp8, fp16 and bf16 I/O kernels
else if constexpr (sizeof(ElementType) == 1 || sizeof(ElementType) == 2) {
if constexpr (sizeof(ElementType) == 1 && is_ws_transposed_B) {
// Only optimized transpositionB (SW32 for int8 and fp8) can be used
if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW128_Atom<ElementType>{}) == 0) {
return GMMA::Layout_MN_SW128_Atom<ElementType>{};
}
else {
static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_SW128_Atom<ElementType>{}) == 0,
"BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_128_Atom<ElementType>{})");
}
}
else {
if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW128_Atom<ElementType>{}) == 0) {
return GMMA::Layout_MN_SW128_Atom<ElementType>{};
}
else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW64_Atom<ElementType>{}) == 0) {
return GMMA::Layout_MN_SW64_Atom<ElementType>{};
}
else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0) {
return GMMA::Layout_MN_SW32_Atom<ElementType>{};
}
else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0) {
return GMMA::Layout_MN_INTER_Atom<ElementType>{};
}
else {
static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0,
"BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{})");
}
}
}
else {
static_assert(cutlass::detail::dependent_false<ElementType>, "Smem selector does not support this element type");
}
}
else if constexpr (major == GMMA::Major::K) {
if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW128_Atom<ElementType>{}) == 0) {
return GMMA::Layout_K_SW128_Atom<ElementType>{};
}
else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW64_Atom<ElementType>{}) == 0) {
return GMMA::Layout_K_SW64_Atom<ElementType>{};
}
else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW32_Atom<ElementType>{}) == 0) {
return GMMA::Layout_K_SW32_Atom<ElementType>{};
}
else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{}) == 0) {
return GMMA::Layout_K_INTER_Atom<ElementType>{};
}
else {
static_assert(BLK_K0 % size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{}) == 0,
"BLK_K0 must be a multiple of size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{})");
}
}
}
// Helper for SS GMMA smem selection that considers a tensor TileShape:
// (BLK_MN, BLK_K)
// or hierarchically
// ((BLK_MN0,BLK_MN1,...),(BLK_K0,BLK_K1,...))
// and returns the largest GMMA::Layout that fits BLK_MN0 and BLK_K0
template <cute::GMMA::Major major, class ElementType, class BLK_MN, class BLK_K>
CUTE_HOST_DEVICE constexpr
auto
ss_smem_selector()
{
using namespace cute;
auto BLK_MN0 = size<0>(BLK_MN{});
auto BLK_K0 = size<0>(BLK_K{});
static_assert(BLK_MN0 % 8 == 0, "BLK_MN0 must be a multiple of 8.");
static_assert(BLK_K0 % 8 == 0, "BLK_K0 must be a multiple of 8.");
if constexpr (major == GMMA::Major::MN) {
if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW128_Atom<ElementType>{}) == 0) {
return GMMA::Layout_MN_SW128_Atom<ElementType>{};
}
else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW64_Atom<ElementType>{}) == 0) {
return GMMA::Layout_MN_SW64_Atom<ElementType>{};
}
else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0) {
return GMMA::Layout_MN_SW32_Atom<ElementType>{};
}
else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0) {
return GMMA::Layout_MN_INTER_Atom<ElementType>{};
}
else {
static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0,
"BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{})");
}
}
else if constexpr (major == GMMA::Major::K) {
if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW128_Atom<ElementType>{}) == 0) {
return GMMA::Layout_K_SW128_Atom<ElementType>{};
}
else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW64_Atom<ElementType>{}) == 0) {
return GMMA::Layout_K_SW64_Atom<ElementType>{};
}
else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW32_Atom<ElementType>{}) == 0) {
return GMMA::Layout_K_SW32_Atom<ElementType>{};
}
else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{}) == 0) {
return GMMA::Layout_K_INTER_Atom<ElementType>{};
}
else {
static_assert(BLK_K0 % size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{}) == 0,
"BLK_K0 must be a multiple of size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{})");
}
}
}
template <class ElementA, class ElementB>
constexpr bool
is_input_size_two_bytes() {
return (sizeof(ElementA) == 2 && sizeof(ElementB) == 2);
}
template <class ElementA, class ElementB>
constexpr bool
is_input_fp8() {
return ((cute::is_same_v<ElementA, float_e4m3_t> || cute::is_same_v<ElementA, float_e5m2_t>) &&
(cute::is_same_v<ElementB, float_e4m3_t> || cute::is_same_v<ElementB, float_e5m2_t>));
}
// We need to handle the tuples in this function since it is used in SFINAE dispatch in the CollectiveBuilder.
// At that point, it is not guaranteed that the tuples have been split out into the required parts.
template <class MaybeTupleElementA, class LayoutA, class MaybeTupleElementB, class LayoutB>
constexpr bool
is_use_rmem_A() {
using ElementA = detail::deduce_mixed_width_dtype_t<0, MaybeTupleElementA>;
using ElementB = detail::deduce_mixed_width_dtype_t<0, MaybeTupleElementB>;
constexpr bool IsABDifferentWidth = cute::sizeof_bits_v<ElementA> != cute::sizeof_bits_v<ElementB>;
constexpr bool HasScales = cute::is_tuple<MaybeTupleElementA>::value ^ cute::is_tuple<MaybeTupleElementB>::value;
constexpr bool IsInputSizeTwoBytes = is_input_size_two_bytes<ElementA, ElementB>();
constexpr bool IsLayoutAkBk = cutlass::gemm::detail::is_k_major_A<LayoutA>() &&
cutlass::gemm::detail::is_k_major_B<LayoutB>();
constexpr bool IsUseRmemA = (!IsInputSizeTwoBytes && !IsLayoutAkBk) || IsABDifferentWidth || HasScales;
return IsUseRmemA;
}
template <class ElementA, int AlignmentA, class ElementB, int AlignmentB, int RequiredAlignment>
constexpr bool
is_aligned() {
return ((sizeof(ElementA) * AlignmentA) % RequiredAlignment == 0) &&
((sizeof(ElementB) * AlignmentB) % RequiredAlignment == 0);
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::collective
| include/cutlass/gemm/collective/builders/sm90_common.inl/0 | {
"file_path": "include/cutlass/gemm/collective/builders/sm90_common.inl",
"repo_id": "include",
"token_count": 6202
} | 28 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This file contains definitions and utility functions for describing problem shapes
for 3.x Ptr-Array GEMMs and Grouped GEMMs.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_coord.h"
#include "cute/container/array.hpp"
#if ! defined(__CUDACC_RTC__)
#include <initializer_list>
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm {
////////////////////////////////////////////////////////////////////////////////////////////////////
template <class ProblemShape_>
struct GroupProblemShape {
using UnderlyingProblemShape = ProblemShape_;
int32_t num_groups = 1;
UnderlyingProblemShape* problem_shapes = nullptr;
UnderlyingProblemShape const* host_problem_shapes = nullptr;
CUTLASS_HOST_DEVICE
int32_t groups() const { return num_groups; }
CUTLASS_HOST_DEVICE
UnderlyingProblemShape const
get_problem_shape(int32_t group_idx) const {
return problem_shapes[group_idx];
}
CUTLASS_HOST_DEVICE
UnderlyingProblemShape const
get_host_problem_shape(int32_t group_idx) const {
return host_problem_shapes[group_idx];
}
CUTLASS_HOST_DEVICE
bool
is_host_problem_shape_available() {
return host_problem_shapes != nullptr;
}
};
template <class ProblemShape_>
class ArrayProblemShape {
public:
using UnderlyingProblemShape = ProblemShape_;
ArrayProblemShape() = default;
ArrayProblemShape(UnderlyingProblemShape ps) : problem_shape_(ps) {}
// Num of groups for Ptr-Array GEMM always remain one, just the number of batches (l) can vary
// This is just to maintain uniformity with GroupProblemShape
constexpr int32_t groups() const { return 1; }
UnderlyingProblemShape* problem_shapes() const {
return &problem_shape_;
}
UnderlyingProblemShape const* host_problem_shapes() const {
return &problem_shape_;
}
// This is just to maintain uniformity with GroupProblemShape
CUTLASS_HOST_DEVICE
UnderlyingProblemShape const
get_problem_shape(int32_t /* unused */ = 0) const {
return problem_shape_;
}
CUTLASS_HOST_DEVICE
UnderlyingProblemShape const
get_host_problem_shape(int32_t /* unused */ = 0) const {
return problem_shape_;
}
CUTLASS_HOST_DEVICE
bool
is_host_problem_shape_available() {
return true;
}
private:
UnderlyingProblemShape problem_shape_{};
};
} // namespace cutlass::gemm
| include/cutlass/gemm/group_array_problem_shape.hpp/0 | {
"file_path": "include/cutlass/gemm/group_array_problem_shape.hpp",
"repo_id": "include",
"token_count": 1233
} | 29 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default configuration for a GEMM with fused epilogue visitor callbacks
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/gemm/kernel/gemm_universal_with_visitor.h"
#include "cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h"
#include "cutlass/epilogue/threadblock/epilogue_with_visitor_callbacks.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Access granularity of C matrix in unit of elements
int kAlignmentC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Element type for epilogue computation
typename ElementEpilogue,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename FusionCallbacks,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Number of stages used in the pipelined epilogue
int EpilogueStages = 1
>
struct DefaultGemmWithVisitor {
using GemmBase = typename DefaultGemmUniversal<
ElementA_, LayoutA_, TransformA, kAlignmentA,
ElementB_, LayoutB_, TransformB, kAlignmentB,
ElementC_, LayoutC_, ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
epilogue::thread::LinearCombination<
ElementC_, kAlignmentC,
ElementAccumulator, ElementEpilogue
>,
ThreadblockSwizzle,
Stages,
Operator
>::GemmKernel;
// Define epilogue
using Epilogue = cutlass::epilogue::threadblock::EpilogueWithVisitorCallbacks<
typename GemmBase::Epilogue,
FusionCallbacks,
EpilogueStages
>;
/// GemmWithVisitor without StreamkFeature member type
template <class SwizzleT, class Enable = void>
class SelectBase :
public GemmWithEpilogueVisitor<
typename GemmBase::Mma,
Epilogue,
SwizzleT>
{};
/// GemmWIthVisitor with StreamkFeature member type
template <class SwizzleT>
class SelectBase<SwizzleT, typename SwizzleT::StreamkFeature> :
public GemmWithEpilogueVisitorStreamk<
typename GemmBase::Mma,
Epilogue,
SwizzleT>
{};
/// Select kernel by ThreadblockSwizzle's support for StreamkFeature
using GemmKernel = SelectBase<ThreadblockSwizzle>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/default_gemm_universal_with_visitor.h/0 | {
"file_path": "include/cutlass/gemm/kernel/default_gemm_universal_with_visitor.h",
"repo_id": "include",
"token_count": 1622
} | 30 |
/***************************************************************************************************
* Copyright (c) 2024 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/arch.h"
#include "cutlass/fast_math.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
#include "cutlass/gemm/kernel/gemm_sparse_universal.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
class GemmSparseUniversalWithAbsmax {
public:
using Base = GemmSparseUniversal<Mma_, Epilogue_, ThreadblockSwizzle_>;
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static int const kSparse = Mma::kSparse;
static int const kMetaSizeInBits = Mma::kMetaSizeInBits;
static int const kMaxID2 = Mma::kMaxID2;
static int const kElementsPerElementE = Mma::kElementsPerElementE;
using ElementE = typename Mma::ElementE;
using LayoutE = typename Mma::LayoutE;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
using ElementAux = typename Epilogue::AuxOutputTileIterator::Element;
using LayoutAux = typename Epilogue::AuxOutputTileIterator::Layout;
using ElementVector = typename Epilogue::ElementVector;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
//
// Structures
//
/// Argument structure
struct Arguments : detail::SparseUniversalArgumentsBase<
LayoutA,
LayoutB,
LayoutC,
LayoutE
> {
using Base = detail::SparseUniversalArgumentsBase<
LayoutA,
LayoutB,
LayoutC,
LayoutE
>;
void const* ptr_Aux;
void const* ptr_Vector;
int64_t batch_stride_Aux;
int64_t batch_stride_Vector;
typename LayoutAux::Stride::LongIndex ldaux;
int64_t ldvector;
typename EpilogueOutputOp::Params epilogue;
Arguments() {}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
void const * ptr_E,
void const * ptr_Aux,
void const * ptr_Vector,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
int64_t batch_stride_E,
int64_t batch_stride_Aux,
int64_t batch_stride_Vector,
typename LayoutA::Stride::LongIndex lda,
typename LayoutB::Stride::LongIndex ldb,
typename LayoutC::Stride::LongIndex ldc,
typename LayoutC::Stride::LongIndex ldd,
typename LayoutC::Stride::LongIndex lde,
typename LayoutAux::Stride::LongIndex ldaux,
int64_t ldvector
)
:
Base(
mode, problem_size, batch_count,
ptr_A, ptr_B, ptr_C, ptr_D, ptr_E,
batch_stride_A, batch_stride_B, batch_stride_C, batch_stride_D, batch_stride_E,
lda, ldb, ldc, ldd, lde
),
ptr_Aux(ptr_Aux),
ptr_Vector(ptr_Vector),
batch_stride_Aux(batch_stride_Aux),
batch_stride_Vector(batch_stride_Vector),
ldaux(ldaux),
ldvector(ldvector),
epilogue(epilogue)
{ }
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : detail::SparseUniversalParamsBase<
Mma,
Epilogue,
Arguments,
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>
{
using ParamsBase = detail::SparseUniversalParamsBase<
Mma,
Epilogue,
Arguments,
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
typename Epilogue::AuxOutputTileIterator::Params params_Aux;
int64_t ldvector;
void* ptr_Aux;
void* ptr_Vector;
int64_t batch_stride_Aux;
int64_t batch_stride_Vector;
typename EpilogueOutputOp::Params output_op;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_Aux(args.ldaux),
ldvector(args.ldvector),
ptr_Aux(const_cast<void *>(args.ptr_Aux)),
ptr_Vector(const_cast<void *>(args.ptr_Vector)),
batch_stride_Aux(args.batch_stride_Aux),
batch_stride_Vector(args.batch_stride_Vector),
output_op(args.epilogue)
{}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversal::Params::update()");
// Update input/output pointers
this->ptr_A = const_cast<void *>(args.ptr_A);
this->ptr_B = const_cast<void *>(args.ptr_B);
this->ptr_C = const_cast<void *>(args.ptr_C);
this->ptr_D = args.ptr_D;
this->ptr_E = const_cast<void *>(args.ptr_E);
ptr_Aux = const_cast<void *>(args.ptr_Aux);
ptr_Vector = const_cast<void *>(args.ptr_Vector);
this->batch_stride_A = args.batch_stride_A;
this->batch_stride_B = args.batch_stride_B;
this->batch_stride_C = args.batch_stride_C;
this->batch_stride_D = args.batch_stride_D;
this->batch_stride_E = args.batch_stride_E;
this->batch_stride_Aux = args.batch_stride_Aux;
batch_stride_Vector = args.batch_stride_Vector;
output_op = args.epilogue;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size,
GemmUniversalMode mode,
int split_k_count) {
return Base::can_implement(problem_size, mode, split_k_count);
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size, args.mode, args.batch_count);
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmSparseUniversalWithAbsmax op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
ThreadblockSwizzle threadblock_swizzle;
run_with_swizzle(params, shared_storage, threadblock_swizzle);
}
/// Executes one GEMM with an externally-provided swizzling function
CUTLASS_DEVICE
void run_with_swizzle(Params const ¶ms, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) {
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
ElementE *ptr_E = static_cast<ElementE *>(params.ptr_E);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A / kSparse;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
ptr_E += threadblock_tile_offset.k() * params.batch_stride_E / kSparse;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
ptr_E = static_cast<ElementE * const *>(params.ptr_E)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k / kSparse,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
cutlass::MatrixCoord tb_offset_E{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k / kSparse / kElementsPerElementE,
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k / kSparse},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
typename Mma::IteratorE iterator_E(
params.params_E,
ptr_E,
{params.problem_size.m(), problem_size_k / kSparse / kElementsPerElementE},
thread_idx,
tb_offset_E);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
iterator_E,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
ElementAux * ptr_Aux = static_cast<ElementAux *>(params.ptr_Aux);
ElementVector * ptr_Vector = static_cast<ElementVector *>(params.ptr_Vector);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
if (ptr_Aux) {
ptr_Aux += threadblock_tile_offset.k() * params.batch_stride_Aux;
}
if (ptr_Vector) {
ptr_Vector += threadblock_tile_offset.k() * params.batch_stride_Vector;
}
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
if (ptr_Aux) {
ptr_Aux = static_cast<ElementAux * const *>(params.ptr_Aux)[threadblock_tile_offset.k()];
}
if (ptr_Vector) {
ptr_Vector = static_cast<ElementVector * const *>(params.ptr_Vector)[threadblock_tile_offset.k()];
}
}
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldvector;
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to auxiliary destination tensor.
typename Epilogue::AuxOutputTileIterator iterator_Aux(
params.params_Aux,
// Only the final block writes the auxiliary tensor
((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1))
? nullptr
: ptr_Aux,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
// Only the final block uses Vector
((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1))
? nullptr
: ptr_Vector,
iterator_D,
accumulators,
iterator_C,
iterator_Aux,
params.problem_size.mn(),
threadblock_offset);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_sparse_universal_with_absmax.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_sparse_universal_with_absmax.h",
"repo_id": "include",
"token_count": 7433
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Base functionality for common types of sparse GEMM kernel parameters
*/
#pragma once
#include "cutlass/cutlass.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure
template <
typename ThreadblockSwizzle,
typename ParamsA,
typename TensorRefA,
typename ParamsB,
typename TensorRefB,
typename ParamsE,
typename TensorRefE>
struct SparseParamsBase
{
//
// Data members
//
cutlass::gemm::GemmCoord problem_size{};
cutlass::gemm::GemmCoord grid_tiled_shape{};
int swizzle_log_tile;
ParamsA params_A{};
TensorRefA ref_A{};
ParamsB params_B{};
TensorRefB ref_B{};
ParamsE params_E{};
TensorRefE ref_E{};
int gemm_k_iterations{0};
int gemm_k_size{0};
//
// Host dispatch API
//
/// Default constructor
SparseParamsBase() = default;
/// Constructor
CUTLASS_HOST_DEVICE
SparseParamsBase(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
TensorRefA ref_A,
TensorRefB ref_B,
TensorRefE ref_E,
int const mma_shape_k)
:
problem_size(problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A.layout()),
ref_A(ref_A),
params_B(ref_B.layout()),
ref_B(ref_B),
params_E(ref_E.layout()),
ref_E(ref_E)
{
int total_gemm_k_iterations = (problem_size.k() + mma_shape_k - 1) / mma_shape_k;
int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size = gemm_k_iterations * mma_shape_k;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/params_sparse_base.h/0 | {
"file_path": "include/cutlass/gemm/kernel/params_sparse_base.h",
"repo_id": "include",
"token_count": 1197
} | 32 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/gemm/kernel/static_tile_scheduler.hpp"
namespace cutlass::gemm::kernel::detail {
///////////////////////////////////////////////////////////////////////////////
// Persistent Thread Block (TB) scheduler
class PersistentTileSchedulerSm90:
public StaticPersistentTileScheduler<PersistentTileSchedulerSm90> {
using BaseScheduler = StaticPersistentTileScheduler<PersistentTileSchedulerSm90>;
public:
using StaticPersistentTileScheduler::StaticPersistentTileScheduler;
using Params = PersistentTileSchedulerSm90Params;
using RasterOrder = typename Params::RasterOrder;
using RasterOrderOptions = typename Params::RasterOrderOptions;
using Arguments = BaseScheduler::Arguments;
// get work_idx_m, work_idx_n from blk_per_grid_dim while applying swizzle
static CUTLASS_DEVICE
cute::tuple<int32_t, int32_t>
get_work_idx_m_and_n(
uint64_t blk_per_grid_dim,
FastDivmodU64Pow2 const& divmod_cluster_shape_major,
FastDivmodU64Pow2 const& divmod_cluster_shape_minor,
FastDivmodU64 const& divmod_cluster_blk_major,
int32_t log_swizzle_size,
RasterOrder raster_order) {
auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster();
return get_work_idx_m_and_n(
blk_per_grid_dim,
divmod_cluster_shape_major,
divmod_cluster_shape_minor,
divmod_cluster_blk_major,
log_swizzle_size,
raster_order,
cta_m_in_cluster,
cta_n_in_cluster
);
}
static CUTLASS_DEVICE
cute::tuple<int32_t, int32_t>
get_work_idx_m_and_n(
uint64_t blk_per_grid_dim,
FastDivmodU64Pow2 const& divmod_cluster_shape_major,
FastDivmodU64Pow2 const& divmod_cluster_shape_minor,
FastDivmodU64 const& divmod_cluster_blk_major,
int32_t log_swizzle_size,
RasterOrder raster_order,
uint64_t cta_m_in_cluster,
uint64_t cta_n_in_cluster) {
uint64_t cluster_id, cluster_major_offset = 0, cluster_minor_offset = 0;
divmod_cluster_shape_major(cluster_id, cluster_major_offset, blk_per_grid_dim);
if (raster_order == RasterOrder::AlongN) {
cluster_minor_offset = cta_m_in_cluster;
}
else {
cluster_minor_offset = cta_n_in_cluster;
}
uint64_t cluster_idx_minor, cluster_idx_major;
uint64_t cluster_idx_minor_div_swizzle, extra, offset;
offset = cluster_id & ((1 << log_swizzle_size) - 1);
extra = cluster_id >> log_swizzle_size;
divmod_cluster_blk_major(cluster_idx_minor_div_swizzle, cluster_idx_major, extra);
cluster_idx_minor = cluster_idx_minor_div_swizzle * (1 << log_swizzle_size) + offset;
auto minor_work_idx = static_cast<int32_t>(cluster_idx_minor * divmod_cluster_shape_minor.divisor +
cluster_minor_offset);
auto major_work_idx = static_cast<int32_t>(cluster_idx_major * divmod_cluster_shape_major.divisor +
cluster_major_offset);
if (raster_order == RasterOrder::AlongN) {
return {minor_work_idx, major_work_idx};
}
else {
return {major_work_idx, minor_work_idx};
}
}
// The basic tile scheduler does not require any additional workspace
template <class ProblemShape, class ElementAccumulator>
static size_t
get_workspace_size(Arguments const&, ProblemShape, KernelHardwareInfo const&, uint32_t, const uint32_t = 1) {
return 0;
}
template <class ProblemShape, class ElementAccumulator>
static cutlass::Status
initialize_workspace(Arguments const&, void*, cudaStream_t, ProblemShape, KernelHardwareInfo const&,
uint32_t, const uint32_t = 1, CudaHostAdapter* cuda_adapter = nullptr) {
return Status::kSuccess;
}
// Kernel helper function to get next work tile
CUTLASS_DEVICE
auto
fetch_next_work(WorkTileInfo work_tile_info) {
if (continue_current_work(work_tile_info)) {
return work_tile_info;
}
advance_to_next_work();
return get_current_work();
}
};
}
| include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp",
"repo_id": "include",
"token_count": 2130
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level batched GEMV assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting SIMT instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/gemm/threadblock/gemv.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/// Template defininng default vector-matrix multiply operators inferred from threadblock tile size,
/// global memory data layout.
template <
typename Shape_, /// Shape of the threadblock vector-matrix multiply operator
typename ThreadShape_, /// Shape of per-thread vector-matrix multiply operator
typename ElementA_, /// Element data type of A operand
typename LayoutA_, /// Layout of operand A
typename ElementB_, /// Element data type of B operand
typename LayoutB_, /// Layout of operand B
typename ElementC_, /// Data type of accumulator
typename LayoutC_ /// Layout of accumulator
>
struct DefaultGemvCore {
using Shape = Shape_;
using ThreadShape = ThreadShape_;
using LayoutA = LayoutA_;
using LayoutB = LayoutB_;
using LayoutC = LayoutC_;
using ElementA = ElementA_;
using ElementB = ElementB_;
using ElementC = ElementC_;
static int const kThreadsPerN = Shape::kN / ThreadShape::kN;
using IteratorPolicyA = typename platform::conditional<
platform::is_same<LayoutA, layout::RowMajor>::value,
cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous<
layout::PitchLinearShape<Shape::kK, Shape::kM>, 1, ThreadShape::kK>,
cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided<
layout::PitchLinearShape<Shape::kM, Shape::kK>, 1, ThreadShape::kM>>::type;
using IteratorA = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<Shape::kM, Shape::kK>, ElementA, LayoutA, 1, IteratorPolicyA>;
using IteratorPolicyB = typename platform::conditional<
platform::is_same<LayoutB, layout::RowMajor>::value,
cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreadsPerN, ThreadShape::kN>,
cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreadsPerN, ThreadShape::kK>>::type;
using IteratorB = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<Shape::kK, Shape::kN>, ElementB, LayoutB, 0, IteratorPolicyB>;
using IteratorPolicyC = typename platform::conditional<
platform::is_same<LayoutC, layout::RowMajor>::value,
cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous<
layout::PitchLinearShape<Shape::kN, Shape::kM>, kThreadsPerN, ThreadShape::kN>,
cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided<
layout::PitchLinearShape<Shape::kM, Shape::kN>, kThreadsPerN, ThreadShape::kM>>::type;
using IteratorC = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC, 0, IteratorPolicyC>;
using MmaSimtOp = typename cutlass::gemm::thread::Mma<
cutlass::gemm::GemmShape<ThreadShape::kM, ThreadShape::kN, Shape::kK>,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC>;
using Operator = MmaSimtOp;
// Assertions for correctness
static_assert((Shape::kM == 1), "M=1 is required for GEMV");
static_assert((ThreadShape::kM == 1), "M=1 is required for GEMV");
static_assert(Shape::kK % ThreadShape::kK == 0, "Shape::K must be a multiple of ThreadShape::K");
static_assert(((ThreadShape::kK == 1) ||
(ThreadShape::kK == 2) ||
(ThreadShape::kK == 4) ||
(ThreadShape::kK == 8) ||
(ThreadShape::kK == 16) ||
(ThreadShape::kK == 32)
),
"ThreadShape::K must be a 1, 2, 4, 8, 16 or 32");
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/default_gemv_core.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_gemv_core.h",
"repo_id": "include",
"token_count": 2552
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/array_planar_complex.h"
#include "cutlass/functional.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_planar_complex_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Transformation applied to A
ComplexTransform TransformA = ComplexTransform::kNone,
/// Transformation applied to B
ComplexTransform TransformB = ComplexTransform::kNone
>
class MmaPlanarComplexMultistage :
public MmaPlanarComplexBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = MmaPlanarComplexBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Policy describing tuning details
using Policy = Policy_;
///< Archtecture tag
using ArchTag = arch::Sm80;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
/// Transformation applied to A
static ComplexTransform const kTransformA = TransformA;
/// Transformation applied to B
static ComplexTransform const kTransformB = TransformB;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = ArrayPlanarComplex<
typename Policy::Operator::FragmentC::Element,
Policy::Operator::FragmentC::kElements
>;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Internal structure exposed for introspection.
struct Detail {
static_assert(Base::kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
/// Number of cp.async instructions to load one stage of operand A
static int const TBLoadIterationsA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const TBLoadIterationsB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
static int const kAccessesPerGroupA =
(TBLoadIterationsA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
static int const kAccessesPerGroupB =
(TBLoadIterationsB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
};
private:
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaPlanarComplexMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
private:
CUTLASS_DEVICE
void copy_tiles_and_advance(
IteratorA &iterator_A_real,
IteratorA &iterator_A_imag,
IteratorB &iterator_B_real,
IteratorB &iterator_B_imag,
int group_start_A = 0,
int group_start_B = 0) {
iterator_A_real.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector);
iterator_A_imag.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Load for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(this->smem_iterator_A_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
auto gmem_ptr_real = iterator_A_real.get();
auto gmem_ptr_imag = iterator_A_imag.get();
bool pred_guard = iterator_A_real.valid();
cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
dst_ptr + v,
gmem_ptr_real,
pred_guard);
cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
dst_ptr + v + (Base::SharedStorage::kImaginaryStrideA / IteratorA::ThreadMap::kElementsPerAccess),
reinterpret_cast<char const *>(gmem_ptr_imag),
pred_guard);
++iterator_A_real;
++iterator_A_imag;
}
++this->smem_iterator_A_;
}
iterator_B_real.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector);
iterator_B_imag.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(this->smem_iterator_B_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
auto gmem_ptr_real = iterator_B_real.get();
auto gmem_ptr_imag = iterator_B_imag.get();
bool pred_guard = iterator_B_real.valid();
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
dst_ptr + v,
gmem_ptr_real,
pred_guard);
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
dst_ptr + v + (Base::SharedStorage::kImaginaryStrideB / IteratorB::ThreadMap::kElementsPerAccess),
reinterpret_cast<char const *>(gmem_ptr_imag),
pred_guard);
++iterator_B_real;
++iterator_B_imag;
}
++this->smem_iterator_B_;
}
}
CUTLASS_DEVICE
void warp_mma_planar_complex(
Operator & warp_mma,
FragmentC &accum,
WarpFragmentA const & real_A,
WarpFragmentA const & imag_A,
WarpFragmentB const & real_B,
WarpFragmentB const & imag_B) {
cutlass::negate<Array<typename WarpFragmentB::Element, WarpFragmentB::kElements>> neg_op_B;
WarpFragmentB neg_real_B = neg_op_B(real_B);
WarpFragmentB neg_imag_B = neg_op_B(imag_B);
warp_mma(accum.real, real_A, real_B, accum.real);
if (kTransformB == ComplexTransform::kNone) {
warp_mma(accum.imag, real_A, imag_B, accum.imag);
}
else {
warp_mma(accum.imag, real_A, neg_imag_B, accum.imag);
}
if (kTransformA == ComplexTransform::kNone) {
warp_mma(accum.imag, imag_A, real_B, accum.imag);
}
else {
warp_mma(accum.imag, imag_A, neg_real_B, accum.imag);
}
if (kTransformA == ComplexTransform::kNone ^ kTransformB == ComplexTransform::kNone) {
warp_mma(accum.real, imag_A, imag_B, accum.real);
}
else {
warp_mma(accum.real, imag_A, neg_imag_B, accum.real);
}
}
public:
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A_real,
///< iterator over A operand in global memory
IteratorA iterator_A_imag,
///< iterator over B operand in global memory
IteratorB iterator_B_real,
///< iterator over B operand in global memory
IteratorB iterator_B_imag,
///< initial value of accumulator
FragmentC const &src_accum) {
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A_real.clear_mask(gemm_k_iterations == 0);
iterator_A_imag.clear_mask(gemm_k_iterations == 0);
iterator_B_real.clear_mask(gemm_k_iterations == 0);
iterator_B_imag.clear_mask(gemm_k_iterations == 0);
iterator_A_real.set_iteration_index(0);
iterator_A_imag.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Load for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8;
bool pred_guard = iterator_A_real.valid();
auto src_ptr_real = iterator_A_real.get();
auto src_ptr_imag = iterator_A_imag.get();
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, src_ptr_real, pred_guard);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v +
Base::SharedStorage::kImaginaryStrideA /
IteratorA::ThreadMap::kElementsPerAccess,
reinterpret_cast<char const *>(src_ptr_imag),
pred_guard);
++iterator_A_real;
++iterator_A_imag;
}
++this->smem_iterator_A_;
}
iterator_B_real.set_iteration_index(0);
iterator_B_imag.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8;
bool pred_guard = iterator_B_real.valid();
auto src_ptr_real = iterator_B_real.get();
auto src_ptr_imag = iterator_B_imag.get();
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, src_ptr_real, pred_guard);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v +
Base::SharedStorage::kImaginaryStrideB /
IteratorB::ThreadMap::kElementsPerAccess,
reinterpret_cast<char const *>(src_ptr_imag),
pred_guard);
++iterator_B_real;
++iterator_B_imag;
}
++this->smem_iterator_B_;
}
// Move to the next stage
iterator_A_real.add_tile_offset({0, 1});
iterator_A_imag.add_tile_offset({0, 1});
iterator_B_real.add_tile_offset({1, 0});
iterator_B_imag.add_tile_offset({1, 0});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Inserts a memory fence between stages of cp.async instructions
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Blocks until all but kStages-2 cp.async stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpFragmentA warp_frag_real_A[2];
WarpFragmentA warp_frag_imag_A[2];
WarpFragmentB warp_frag_real_B[2];
WarpFragmentB warp_frag_imag_B[2];
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_real_A[0]);
this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[0], Base::SharedStorage::kImaginaryStrideA);
this->warp_tile_iterator_B_.load(warp_frag_real_B[0]);
this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[0], Base::SharedStorage::kImaginaryStrideB);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
iterator_A_real.clear_mask(gemm_k_iterations == 0);
iterator_A_imag.clear_mask(gemm_k_iterations == 0);
iterator_B_real.clear_mask(gemm_k_iterations == 0);
iterator_B_imag.clear_mask(gemm_k_iterations == 0);
// Start issuing the first group of the next stage outside of the mainloop
copy_tiles_and_advance(iterator_A_real, iterator_A_imag, iterator_B_real, iterator_B_imag);
Operator warp_mma;
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_real_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideA);
this->warp_tile_iterator_B_.load(warp_frag_real_B[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideB);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
// Issue global->shared copies for the next stage
int group_start_iteration_A, group_start_iteration_B;
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
group_start_iteration_A = 0;
group_start_iteration_B = 0;
}
else {
group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB;
}
copy_tiles_and_advance(
iterator_A_real,
iterator_A_imag,
iterator_B_real,
iterator_B_imag,
group_start_iteration_A,
group_start_iteration_B);
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
// Inserts a memory fence between stages of cp.async instructions
cutlass::arch::cp_async_fence();
// Blocks until all but kStages-2 cp.async stages have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A_real.add_tile_offset({0, 1});
iterator_A_imag.add_tile_offset({0, 1});
iterator_B_real.add_tile_offset({1, 0});
iterator_B_imag.add_tile_offset({1, 0});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
iterator_A_real.clear_mask(gemm_k_iterations == 0);
iterator_A_imag.clear_mask(gemm_k_iterations == 0);
iterator_B_real.clear_mask(gemm_k_iterations == 0);
iterator_B_imag.clear_mask(gemm_k_iterations == 0);
}
warp_mma_planar_complex(
warp_mma,
accum,
warp_frag_real_A[warp_mma_k % 2],
warp_frag_imag_A[warp_mma_k % 2],
warp_frag_real_B[warp_mma_k % 2],
warp_frag_imag_B[warp_mma_k % 2]);
}
}
// Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/mma_planar_complex_multistage.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/mma_planar_complex_multistage.h",
"repo_id": "include",
"token_count": 9412
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
This is a work in progress.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/mma.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Used for partial specialization
typename Enable = bool
>
class MmaVoltaTensorOp {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Architecture tag
using ArchTag = arch::Sm70;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Underlying instruction shape
using InstructionShape = typename ArchMmaOperator::Shape;
/// Complex transform on A operand
static ComplexTransform const kTransformA = ComplexTransform::kNone;
/// Complex transform on B operand
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// interleaved 32x32 tiles
using InterleavedTileShape = GemmShape<32, 32, 4>;
static_assert(!(Shape::kM % InterleavedTileShape::kM) &&
!(Shape::kN % InterleavedTileShape::kN),
"Shape must be a multiple of InterleavedTileShape.");
public:
/// Iterates over the A operand in memory
using IteratorA = MmaVoltaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<
ArchMmaOperator::Shape::kM,
ArchMmaOperator::Shape::kK
>,
Policy::OpDelta::kRow,
kThreadCount
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Iterates over the B operand in memory
using IteratorB = MmaVoltaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<
ArchMmaOperator::Shape::kK,
ArchMmaOperator::Shape::kN
>,
Policy::OpDelta::kRow,
kThreadCount
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Iterates over the C operand in memory
using IteratorC = MmaVoltaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta
>;
/// Storage for C tile
using FragmentC = typename IteratorC::Fragment;
private:
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
InterleavedTileShape::kM / ArchMmaOperator::Shape::kM,
InterleavedTileShape::kN / ArchMmaOperator::Shape::kN
>;
using TileIterations = MatrixShape<
Shape::kM / InterleavedTileShape::kM,
Shape::kN / InterleavedTileShape::kN
>;
// Whether matrix B is reordered
bool reorder_B_;
public:
/// Underlying matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaVoltaTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) {
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
D = C;
MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A);
MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B);
MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D);
CUTLASS_PRAGMA_UNROLL
for (int outer_col = 0; outer_col < TileIterations::kColumn; ++outer_col) {
CUTLASS_PRAGMA_UNROLL
for (int inner_col = 0; inner_col < MmaIterations::kColumn; ++inner_col) {
CUTLASS_PRAGMA_UNROLL
for (int outer_row = 0; outer_row < TileIterations::kRow; ++outer_row) {
CUTLASS_PRAGMA_UNROLL
for (int inner_row = 0; inner_row < MmaIterations::kRow; ++inner_row) {
int op_col = inner_col + MmaIterations::kColumn * outer_col;
// Column-major serpentine sequence to maximize reuse of A operand.
int inner_row_serp = inner_row;
int outer_row_serp = outer_row;
if (op_col & 1) {
inner_row_serp = MmaIterations::kRow - inner_row - 1;
outer_row_serp = TileIterations::kRow - outer_row - 1;
}
int op_row = inner_row_serp + MmaIterations::kRow * outer_row_serp;
int op_idx = inner_row_serp + MmaIterations::kRow *
(inner_col + MmaIterations::kColumn *
(outer_row_serp + TileIterations::kRow * outer_col));
mma(
ptr_D[op_idx],
ptr_A[op_row],
ptr_B[op_col],
ptr_D[op_idx]);
}
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/warp/mma_tensor_op_sm70.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_sm70.h",
"repo_id": "include",
"token_count": 3138
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over one or more ranks of an affine tensor
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/fast_math.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/device_kernel.h"
#include "cutlass/reduction/thread/reduction_operators.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
/// Parameters structure
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
struct TensorReductionAffineStridedParams {
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
Coord<kRank> extent; /// Extent of source tensor
FastDivmodU64 divmod[kRank - 1]; /// FastDivmod by each strided rank
int64_t dst_stride[kReducedRank - 1]; /// stride (units of bytes) - I, J
int64_t src_stride[kRank - 1]; /// stride (units of bytes) - I, J, K
int64_t workspace_stride; /// stride (units of bytes) between workspace
int64_t workspace_outer_stride; /// stride (units of bytes) between 'rows' of the workspace
int workspace_count; /// number of workspaces
uint64_t inner_count; /// Number of elements in reduced index space
uint64_t outer_count; /// Number of elements in outer index space
ElementOutput * destination; /// Pointer to output tensor of rank kReducedRank
ElementSource const * source; /// Pointer to source pointer of rank kRank
ReductionOp reduction_op; /// Reduction operator
ElementCompute reduction_identity; /// Identity element for reduction operator
ElementCompute *device_workspace; /// Pointer to device workspace for inter-CTA reductions
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorReductionAffineStridedParams() {
}
/// Ctor
TensorReductionAffineStridedParams(
Coord<kRank> extent_, ///< Extent of source tensor
ElementOutput * dst_ptr_, ///< Output tensor data
int64_t dst_stride_[], ///< Stride (units of elements)
ElementSource const * src_ptr_, ///< Source tensor data
int64_t src_stride_[], ///< Stride (units of elements)
ElementCompute *device_workspace_, ///< Pointer to device workspace for inter-CTA reductions
int64_t workspace_stride_, ///< Stride between workspaces
int workspace_count_, ///< Number of workspaces
ReductionOp reduction_op_, ///< Reduction operator
ElementCompute reduction_identity_ = ElementCompute() ///< Identity element for reduction operator
):
extent(extent_),
inner_count(1),
outer_count(1),
destination(dst_ptr_),
source(src_ptr_),
device_workspace(device_workspace_),
workspace_outer_stride(0),
workspace_stride(workspace_stride_),
workspace_count(workspace_count_),
reduction_op(reduction_op_),
reduction_identity(reduction_identity_) {
// Initialize divisors for fast div-mod
for (int p = 1; p < kRank; ++p) {
divmod[p - 1] = FastDivmodU64(uint64_t(extent[p]));
}
int input_size_bits = sizeof_bits<ElementSource>::value;
int output_size_bits = sizeof_bits<ElementOutput>::value;
workspace_outer_stride = workspace_stride * workspace_count;
// Compute strides in units of bytes
for (int p = 0; p < kReducedRank - 1; ++p) {
dst_stride[p] = dst_stride_[p] * output_size_bits / 8;
}
for (int p = 0; p < kRank - 1; ++p) {
src_stride[p] = src_stride_[p] * input_size_bits / 8;
}
// Compute number of elements in strided ranks
for (int p = 0; p < kReducedRank - 1; ++p) {
outer_count *= uint64_t(extent[p]);
}
for (int p = 0; p < kInnerRank; ++p) {
inner_count *= uint64_t(extent[kReducedRank + p - 1]);
}
}
};
/// Kernel to reduce a tensor with affine layout over a set of ranks *EXCLUDING* the contiguous
/// rank. This leads to favorable vectorized memory accesses over the contiguous rank.
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
class TensorReductionAffineStrided {
public:
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
using ComputeFragment = Array<ElementCompute, VectorLength>;
using SourceFragment = AlignedArray<ElementSource, VectorLength>;
using OutputFragment = AlignedArray<ElementOutput, VectorLength>;
/// Shared memory allocation used for reduction within the CTA
struct SharedStorage {
Array<ElementCompute, kThreads * kVectorLength> workspace;
};
/// Parameters structure
using Params = TensorReductionAffineStridedParams<
Rank,
ReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
VectorLength,
ElementCompute,
Threads,
BatchSize
>;
private:
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_inner_coord_and_offset_(
Params const ¶ms,
Coord<kInnerRank> & coord,
int64_t &src_offset,
uint64_t linear_idx) const {
// Decompose into coordinate
coord = CoordinateDecomposition<kInnerRank>(linear_idx, ¶ms.divmod[kReducedRank - 1]);
// Compute linear offset
src_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kInnerRank; ++i) {
src_offset += params.src_stride[kReducedRank + i - 1] * coord[i];
}
}
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_outer_coord_and_offset_(
Params const ¶ms,
Coord<kReducedRank - 1> & coord,
int64_t &dst_offset,
int64_t &src_offset,
uint64_t linear_idx) const {
// Decompose linear coordinate
coord = CoordinateDecomposition<kReducedRank - 1>(linear_idx, params.divmod);
// Compute offset into tensors
dst_offset = 0;
src_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kReducedRank - 1; ++i) {
dst_offset += params.dst_stride[i] * coord[i];
src_offset += params.src_stride[i] * coord[i];
}
}
/// Reduces over the reduction indices
CUTLASS_DEVICE
ComputeFragment reduce_indices_(
Params const ¶ms,
ElementCompute *threadblock_workspace,
char const *src_byte_ptr) {
NumericArrayConverter<ElementCompute, ElementSource, VectorLength> convert_source;
ReductionOp reduction_op(params.reduction_op);
// Accumulated output
ComputeFragment identity_frag;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < int(identity_frag.size()); ++i) {
identity_frag[i] = params.reduction_identity;
}
if (!params.inner_count) {
return identity_frag;
}
ComputeFragment accumulator = identity_frag;
// Compute the coordinate of the first access
int64_t src_byte_offset = 0;
Coord<kInnerRank> coord;
uint64_t linear_idx = threadIdx.z + blockIdx.z * blockDim.z;
compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx);
// Load the first vector
SourceFragment source_fragment[kBatchSize];
bool not_done = true;
// Iterate over vectors in a linearized reduction index space
while (not_done) {
bool guards[kBatchSize];
// Issue a batch of loads
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (linear_idx < params.inner_count) {
source_fragment[b] = *reinterpret_cast<SourceFragment const *>(src_byte_ptr + src_byte_offset);
guards[b] = true;
}
else {
guards[b] = false;
not_done = false;
}
linear_idx += blockDim.z * gridDim.z;
compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx);
}
// Perform a batch of reduction operations
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (guards[b]) {
auto cvt = convert_source(source_fragment[b]);
accumulator = cutlass::reduction::thread::detail::ApplyArrayOperator(
reduction_op,
accumulator,
cvt);
}
}
};
// Optional reduction within a CTA
if (blockDim.z > 1) {
// Linearized thread ID
int thread_idx = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
// all threads store to workspace
ComputeFragment *frag_ptr = reinterpret_cast<ComputeFragment *>(threadblock_workspace);
frag_ptr[thread_idx] = accumulator;
__syncthreads();
if (threadIdx.z == 0) {
// Load all additional block indices
for (int z = 1; z < blockDim.z; ++z) {
ComputeFragment frag = frag_ptr[thread_idx + z * blockDim.x * blockDim.y];
accumulator = cutlass::reduction::thread::detail::ApplyArrayOperator(
reduction_op,
accumulator,
frag);
}
}
__syncthreads();
}
return accumulator;
}
public:
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
int coord_c = (blockIdx.x * blockDim.x + threadIdx.x) * kVectorLength;
char const * src_byte_ptr = reinterpret_cast<char const *>(params.source + coord_c);
char * dst_byte_ptr = nullptr;
// If performing a reduction across CTAs, redirect output to device workspace
if (gridDim.z == 1) {
dst_byte_ptr = reinterpret_cast<char *>(params.destination + coord_c);
}
else {
dst_byte_ptr = reinterpret_cast<char *>(params.device_workspace + coord_c);
}
// If the C index is out of bounds, exit
if (coord_c >= params.extent[kRank - 1]) {
return;
}
int64_t idx_linear = blockIdx.y * blockDim.y + threadIdx.y;
// Use modulo division to compute location
Coord<kReducedRank - 1> outer_coord;
int64_t dst_byte_offset;
int64_t src_byte_offset;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
if (gridDim.z == 1) {
/// Complete the reduction with no workspace
while (idx_linear < params.outer_count) {
ComputeFragment result;
result = reduce_indices_(
params,
shared_storage.workspace.data(),
src_byte_ptr + src_byte_offset);
// Store the result after possible final reduction within the CTA
if (threadIdx.z == 0) {
// Convert to output type and store
NumericArrayConverter<ElementOutput, ElementCompute, VectorLength> convert_output;
auto cvt = convert_output(result);
*reinterpret_cast<OutputFragment *>(dst_byte_ptr + dst_byte_offset) =
reinterpret_cast<OutputFragment const &>(cvt);
}
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
} // while
}
else {
/// Complete the reduction with a device workspace
while (idx_linear < params.outer_count) {
ComputeFragment result;
result = reduce_indices_(
params,
shared_storage.workspace.data(),
src_byte_ptr + src_byte_offset);
// Store the result after possible final reduction within the CTA
if (threadIdx.z == 0) {
int64_t byte_offset =
blockIdx.z * params.workspace_stride + idx_linear * params.workspace_outer_stride;
// No conversion - store in compute type
*reinterpret_cast<ComputeFragment *>(dst_byte_ptr + byte_offset) =
reinterpret_cast<ComputeFragment const &>(result);
}
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
} // while (outer index)
} // if ()
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to perform final reduction
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
class TensorReductionAffineStridedFinal {
public:
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
using ComputeFragment = Array<ElementCompute, VectorLength>;
using SourceFragment = AlignedArray<ElementSource, VectorLength>;
using OutputFragment = AlignedArray<ElementOutput, VectorLength>;
/// Shared memory
struct SharedStorage { };
/// Parameters structure
using Params = TensorReductionAffineStridedParams<
Rank,
ReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
VectorLength,
ElementCompute,
Threads,
BatchSize
>;
private:
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_outer_coord_and_offset_(
Params const ¶ms,
Coord<kReducedRank - 1> & coord,
int64_t &dst_offset,
uint64_t linear_idx) const {
// Decompose linear index
coord = CoordinateDecomposition<kReducedRank - 1>(linear_idx, params.divmod);
// Compute tensor offset
dst_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kReducedRank - 1; ++i) {
dst_offset += params.dst_stride[i] * coord[i];
}
}
/// Reduces over the reduction indices
CUTLASS_DEVICE
ComputeFragment reduce_indices_(
Params const ¶ms,
char *src_byte_ptr) {
ReductionOp reduction_op(params.reduction_op);
// Accumulated output
ComputeFragment identity_frag;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < int(identity_frag.size()); ++i) {
identity_frag[i] = params.reduction_identity;
}
ComputeFragment accumulator = identity_frag;
ComputeFragment workspace_fragments[kBatchSize];
// Partially unrolled loop
for (int idx = 0; idx < params.workspace_count; idx += kBatchSize) {
// Issue a batch of loads
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (idx + b < params.workspace_count) {
workspace_fragments[b] =
*reinterpret_cast<ComputeFragment *>(src_byte_ptr);
}
else {
workspace_fragments[b] = identity_frag;
}
src_byte_ptr += + params.workspace_stride;
}
// Perform a reduction
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorLength; ++i) {
accumulator[i] = reduction_op(accumulator[i], workspace_fragments[b][i]);
}
}
}
return accumulator;
}
public:
//
// Methods
//
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
int coord_c = (blockIdx.x * blockDim.x + threadIdx.x) * kVectorLength;
char * src_byte_ptr = reinterpret_cast<char *>(params.device_workspace + coord_c);
char * dst_byte_ptr = reinterpret_cast<char *>(params.destination + coord_c);
// If the C index is out of bounds, exit
if (coord_c >= params.extent[kRank - 1]) {
return;
}
int64_t idx_linear = blockIdx.y * blockDim.y + threadIdx.y;
// Use modulo division to compute location
Coord<kReducedRank - 1> outer_coord;
int64_t dst_byte_offset;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
idx_linear);
/// Complete the reduction
while (idx_linear < params.outer_count) {
int64_t src_byte_offset = idx_linear * params.workspace_outer_stride;
ComputeFragment result = reduce_indices_(
params,
src_byte_ptr + src_byte_offset);
// Convert to output type and store
NumericArrayConverter<ElementOutput, ElementCompute, VectorLength> convert_output;
auto cvt = convert_output(result);
*reinterpret_cast<OutputFragment *>(dst_byte_ptr + dst_byte_offset) =
reinterpret_cast<OutputFragment const &>(cvt);
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
idx_linear);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace reduction
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/reduction/kernel/tensor_reduce_affine_strided.h/0 | {
"file_path": "include/cutlass/reduction/kernel/tensor_reduce_affine_strided.h",
"repo_id": "include",
"token_count": 8525
} | 37 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Transform Kernel Universal adapter
*/
#pragma once
// common
#include "cutlass/cutlass.h"
#include "cutlass/cluster_launch.hpp"
#include "cutlass/device_kernel.h"
#include "cutlass/cuda_host_adapter.hpp"
namespace cutlass::transform::device {
template <class TransformKernel_>
class TransformUniversalAdapter
{
public:
using TransformKernel = TransformKernel_;
using Arguments = typename TransformKernel::Arguments;
using Params = typename TransformKernel::Params;
private:
Params params_;
static constexpr bool const EnableCudaHostAdapter = CUTLASS_ENABLE_CUDA_HOST_ADAPTER;
public:
Params const& params() const {
return this->params_;
}
static Status
can_implement(Arguments const& args) {
return TransformKernel::can_implement(args);
}
static size_t
get_workspace_size(Arguments const& args) {
return TransformKernel::get_workspace_size(args);
}
static dim3
get_grid_shape(Arguments const& args, void* workspace = nullptr) {
auto tmp_params = TransformKernel::to_underlying_arguments(args, workspace);
return TransformKernel::get_grid_shape(tmp_params);
}
static dim3
get_grid_shape(Params const& params) {
return TransformKernel::get_grid_shape(params);
}
Status
initialize(
Arguments & args,
void* workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr) {
CUTLASS_TRACE_HOST("TransformUniversalAdapter::initialize() - workspace "
<< workspace << ", stream: " << (stream ? "non-null" : "null"));
// Initialize the workspace
Status status = TransformKernel::initialize_workspace(args, workspace, stream, cuda_adapter);
if (status != Status::kSuccess) {
return status;
}
// Initialize the Params structure
this->params_ = TransformKernel::to_underlying_arguments(args, workspace);
// Don't set the function attributes - require the CudaHostAdapter to set it.
if constexpr (EnableCudaHostAdapter) {
CUTLASS_ASSERT(cuda_adapter);
return Status::kSuccess;
}
else {
//
// Account for dynamic smem capacity if needed
//
int smem_size = TransformKernel::SharedStorageSize;
CUTLASS_ASSERT(cuda_adapter == nullptr);
if (smem_size >= (48 << 10)) {
CUTLASS_TRACE_HOST(" Setting smem size to " << smem_size);
cudaError_t result = cudaFuncSetAttribute(
device_kernel<TransformKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (cudaSuccess != result) {
result = cudaGetLastError();
CUTLASS_TRACE_HOST(" cudaFuncSetAttribute() returned error: " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
}
return Status::kSuccess;
}
static Status
run(
Params& params,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr,
int32_t kernel_index = 0) {
CUTLASS_TRACE_HOST("TransformUniversalAdapter::run()");
dim3 const block = TransformKernel::get_block_shape();
dim3 const grid = get_grid_shape(params);
// Currently only support 1x1x1 for transform kernel.
dim3 const cluster = {1,1,1};
// configure smem size and carveout
int smem_size = TransformKernel::SharedStorageSize;
Status launch_result;
// Use extended launch API only for mainloops that use it
if constexpr(TransformKernel::ArchTag::kMinComputeCapability >= 90) {
void* kernel_params[] = {¶ms};
if constexpr (EnableCudaHostAdapter) {
//
// Use the cuda host adapter
//
CUTLASS_ASSERT(cuda_adapter);
if (cuda_adapter) {
launch_result = cuda_adapter->launch(
grid, cluster, block, smem_size, stream, kernel_params, kernel_index);
}
else {
return Status::kErrorInternal;
}
}
else {
CUTLASS_ASSERT(cuda_adapter == nullptr);
void const* kernel = (void const*) device_kernel<TransformKernel>;
launch_result = ClusterLauncher::launch(
grid, cluster, block, smem_size, stream, kernel, kernel_params);
}
}
else {
launch_result = Status::kSuccess;
if constexpr (EnableCudaHostAdapter) {
CUTLASS_ASSERT(cuda_adapter);
if (cuda_adapter) {
void* kernel_params[] = {¶ms};
launch_result = cuda_adapter->launch(
grid, block, smem_size, stream, kernel_params, 0);
}
else {
return Status::kErrorInternal;
}
}
else {
CUTLASS_ASSERT(cuda_adapter == nullptr);
device_kernel<TransformKernel><<<grid, block, smem_size, stream>>>(params);
}
}
cudaError_t result = cudaGetLastError();
if (cudaSuccess == result && Status::kSuccess == launch_result) {
return Status::kSuccess;
} else {
CUTLASS_TRACE_HOST(" Kernel launch failed. Reason: " << result);
return Status::kErrorInternal;
}
}
Status
run(
Arguments & args,
void* workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr,
int32_t kernel_index = 0) {
Status status = initialize(args, workspace, stream, cuda_adapter);
if (Status::kSuccess == status) {
status = run(this->params_, stream, cuda_adapter, kernel_index);
}
return status;
}
Status
operator()(
Arguments & args,
void* workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr) {
return run(args, workspace, stream, cuda_adapter);
}
Status
run(
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr) {
return run(this->params_, stream, cuda_adapter);
}
Status
operator()(
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr) {
return run(this->params_, stream, cuda_adapter);
}
};
} // namespace cutlass::transform::device
| include/cutlass/transform/device/transform_universal_adapter.hpp/0 | {
"file_path": "include/cutlass/transform/device/transform_universal_adapter.hpp",
"repo_id": "include",
"token_count": 2826
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cstdint>
#include <string>
#define CUTLASS_MAJOR 3
#define CUTLASS_MINOR 5
#define CUTLASS_PATCH 1
#ifdef CUTLASS_VERSIONS_GENERATED
#include "cutlass/version_extended.h"
#else
#define CUTLASS_BUILD 0
#define CUTLASS_REVISION ""
#endif
#define CUTLASS_VERSION ((CUTLASS_MAJOR)*100 + (CUTLASS_MINOR)*10 + CUTLASS_PATCH)
namespace cutlass {
inline constexpr uint32_t getVersion() {
return CUTLASS_VERSION;
}
inline constexpr uint32_t getVersionMajor() {
return CUTLASS_MAJOR;
}
inline constexpr uint32_t getVersionMinor() {
return CUTLASS_MINOR;
}
inline constexpr uint32_t getVersionPatch() {
return CUTLASS_PATCH;
}
inline constexpr uint32_t getVersionBuild() {
return CUTLASS_BUILD + 0;
}
inline std::string getVersionString() {
std::string version = "@CUTLASS_VERSION@";
if (getVersionBuild()) {
version += "." + std::to_string(getVersionBuild());
}
return version;
}
inline std::string getGitRevision() {
return "@CUTLASS_REVISION@";
}
} // namespace cutlass
| include/cutlass/version.h/0 | {
"file_path": "include/cutlass/version.h",
"repo_id": "include",
"token_count": 888
} | 39 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Ease-of-use interface for constructing, compiling, and running GEMMs.
The ``GroupedGemm`` interface is meant to allow one to easily instantiate, compile, and run
grouped GEMM operations in CUTLASS via Python, without specifying many configuration parameters.
Under the hood, the interface will select sensible default parameters for the many template
parameters for CUTLASS grouped GEMMs.
Note: optimal performance is not to be expected from this interface. To achieve optimal
performance, one should specify and tune each configuration parameter.
The simplest example of using this interface is the following:
.. highlight:: python
.. code-block:: python
# As, Bs, Cs, and Ds are torch/numpy/cupy tensor objects
plan = cutlass.op.GroupedGemm(element=cutlass.DataType.f16, layout=cutlass.LayoutType.RowMajor)
plan.run([A0, A1], [B0, B1], [C0, C1], [D0, D1])
"""
from cutlass_library import DataTypeSize
from cuda import cuda
from cutlass.backend.gemm_operation import (
GemmGroupedArguments,
GemmOperationGrouped,
)
from cutlass.backend.library import (
SchedulerMode,
TensorDescription,
TileDescription,
)
from cutlass.op.gemm import Gemm
from cutlass.shape import GemmCoord
from cutlass.utils import check, datatypes
class GroupedGemm(Gemm):
"""
Constructs a ``GroupedGemm`` object.
The data types and layouts of operands A, B, and C, along with the data type of output D
and that used for accumulation, are bound to the ``GroupedGemm`` object throughout its lifetime --
these are not to be changed after a ``GroupedGemm`` has been constructed.
The constructor has optional parameters for flexibly setting these parameters. Please see the constructor
for ``Gemm`` for examples of these.
:param cc: compute capability of device to generate kernels for
:type cc: int
:param A: tensor representing data type and layout of operands A
:param B: tensor representing data type and layout of operands B
:param C: tensor representing data type and layout of operands C
:param D: tensor representing data type and layout of operands D
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param element_accumulator: data type to be used in accumulation of the product of operands A and B
:type element_accumulator: cutlass.DataType
:param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type
:type element: cutlass.DataType
:param layout: generic layout type to be used for operands A, B, C, and D
:type layout: cutlass.LayoutType
:param element_A: data type to be used for operand A
:type element_A: cutlass.DataType
:param element_B: data type to be used for operand B
:type element_B: cutlass.DataType
:param element_C: data type to be used for operand C
:type element_C: cutlass.DataType
:param element_D: data type to be used for operand D
:type element_D: cutlass.DataType
:type layout_A: layout of operand A
:param layout_A: cutlass.LayoutType
:type layout_B: layout of operand B
:param layout_B: cutlass.LayoutType
:type layout_C: layout of operand C
:param layout_C: cutlass.LayoutType
:type layout_D: layout of operand D
:param layout_D: cutlass.LayoutType
"""
def __init__(
self, A=None, B=None, C=None, D=None,
alpha=1.0, beta=0.0, element_accumulator=None,
element=None, layout=None,
element_A=None, element_B=None, element_C=None, element_D=None,
layout_A=None, layout_B=None, layout_C=None,
cc: int = None,
):
super().__init__(
A=A, B=B, C=C, D=D,
alpha=alpha, beta=beta,
element_accumulator=element_accumulator,
element=element, layout=layout,
element_A=element_A, element_B=element_B,
element_C=element_C, element_D=element_D,
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C,
cc=cc
)
# Grouped GEMM specializations for SM90 are currently unavailable. Revert to using SM80
if self.current_cc == 90:
self._reset_options(80)
self._reset_operations(reset_epilogue=False)
self.name = "grouped_gemm"
@Gemm.swizzling_functor.setter
def swizzling_functor(self, swizzling_functor):
"""
Sets the swizzling functor to the type specified by `swizzling_functor`
"""
raise Exception('Grouped GEMM does not currently support different swizzling functors')
def construct(self, tile_description: TileDescription = None,
alignment_A: int = None,
alignment_B: int = None,
alignment_C: int = None) -> GemmOperationGrouped:
"""
Constructs a ``cutlass.backend.GemmOperationGrouped`` based on the input parameters and current
kernel specification of the ``Gemm`` object.
:param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:return: operation that was constructed
:rtype: cutlass.backend.GemmOperationGrouped
"""
alignment_A = check.alignment_or_default(alignment_A, max(self.possible_operations.alignments("A")))
alignment_B = check.alignment_or_default(alignment_B, max(self.possible_operations.alignments("B")))
alignment_C = check.alignment_or_default(alignment_C, max(self.possible_operations.alignments("C")))
self.epilogue_functor = self._reset_epilogue_functor_alignment(alignment_C, self.epilogue_functor)
tensor_A = TensorDescription(self._element_a, self._layout_b, alignment_A)
tensor_B = TensorDescription(self._element_b, self._layout_b, alignment_B)
tensor_C = TensorDescription(self._element_c, self._layout_c, alignment_C)
if tile_description is None:
op = self.possible_operations.operations(alignment_A, alignment_B, alignment_C, self._math_operation)[0]
tile_description = datatypes.td_from_profiler_op(op)
else:
valid, err_str = self._valid_tile_description(tile_description)
if not valid:
raise Exception(f"Invalid tile description. {err_str}")
self.tile_description = tile_description
operation = GemmOperationGrouped(
arch=self.current_cc,
tile_description=tile_description,
A=tensor_A, B=tensor_B, C=tensor_C,
epilogue_functor=self.epilogue_functor,
swizzling_functor=self._swizzling_functor,
precompute_mode=SchedulerMode.Device)
return operation
def run(self, A, B, C, D,
alpha=None, beta=None, sync: bool = True,
print_module: bool = False,
stream: cuda.CUstream = cuda.CUstream(0)) -> GemmGroupedArguments:
"""
Runs the kernel currently specified.
By default, this call returns only once the kernel has completed. To launch the kernel
and immediately return, set ``sync=False``. In this case, it is the responsibility of the
caller to syncrhonize the results of the kernel before attempting to access outputs
by calling ``sync()`` on the arguments returned from this call.
:param A: list of tensors representing data type and layout of operand A
:type A: list
:param B: list of tensors representing data type and layout of operand B
:type B: list
:param C: list of tensors representing data type and layout of operand C
:type C: list
:param D: list of tensors representing data type and layout of operand D
:type D: list
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param sync: whether the call should wait for the kernel to complete before returning
:type sync: bool
:param print_module: whether to print the emitted C++ code
:type print_module: bool
:param stream: cuda stream, defaults to cuda.cuda.CUstream(0)
:type stream: :class:`cuda.cuda.CUstream`
:return: arguments passed in to the kernel
:rtype: cutlass.backend.GemmGroupedArguments
"""
super().run_setup()
if len(A) != len(B) or len(A) != len(C) or len(A) != len(D):
raise Exception("Lengths of A, B, C, and D lists must be equal")
problem_sizes = []
As, Bs, Cs, Ds = ([None] * len(A) for _ in range(4))
for i in range(len(A)):
As[i] = self._verify_tensor(A[i], self.A, self._element_a, self._layout_a, "A")
Bs[i] = self._verify_tensor(B[i], self.B, self._element_b, self._layout_b, "B")
Cs[i] = self._verify_tensor(C[i], self.C, self._element_c, self._layout_c, "C")
Ds[i] = self._verify_tensor(D[i], self.D, self._element_d, self._layout_d, "D")
problem_sizes.append(GemmCoord(A[i].shape[0], B[i].shape[1], A[i].shape[1]))
alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha")
beta = self._verify_scalar(beta, self.beta, self._element_c, "beta")
alignment_a = min((self.possible_operations.find_alignment(A.shape, self._layout_a, operand="A") for A in As))
alignment_b = min((self.possible_operations.find_alignment(B.shape, self._layout_b, operand="B") for B in Bs))
alignment_c = min((self.possible_operations.find_alignment(C.shape, self._layout_c, operand="C") for C in Cs))
self.compile(self.tile_description, alignment_A=alignment_a, alignment_B=alignment_b,
alignment_C=alignment_c, print_module=print_module)
arguments = GemmGroupedArguments(
operation=self.operation,
problem_sizes=problem_sizes,
A=As, B=Bs, C=Cs, D=Ds,
output_op=self.operation.epilogue_type(alpha, beta),
stream=stream
)
self.operation.run(arguments)
if sync:
arguments.sync()
return arguments
| python/cutlass/op/gemm_grouped.py/0 | {
"file_path": "python/cutlass/op/gemm_grouped.py",
"repo_id": "python",
"token_count": 4658
} | 40 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for emitting Rank2K kernels
"""
import enum
import functools
import operator
import os.path
import shutil
try:
import builtins
if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True:
raise ImportError("Disabling attempt to import cutlass_library")
from cutlass_library.library import *
except ImportError:
from library import *
###################################################################################################
#
# Data structure modeling a Rank K update operation
#
###################################################################################################
#
class Rank2KOperation:
#
def __init__(self, rank_k_kind, arch, tile_description, A, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \
blas_mode = BlasMode.symmetric):
self.blas_mode = blas_mode
self.operation_kind = OperationKind.Rank2K
self.arch = arch
self.tile_description = tile_description
self.rank_k_kind = rank_k_kind
# tensor A and B have same data type and layout
self.A = A
self.B = A
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
return False
#
def is_mixed_input(self):
return self.A.element != self.B.element
#
def is_planar_complex(self):
return False
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
MathOperation.and_popc: 'and'
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
operation_name = 'syr2k' if self.blas_mode == BlasMode.symmetric else 'her2k'
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, operation_name)
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)]
)
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def fill_mode_name(self):
return "%s" % (ShortFillModeNames[self.C.fill_mode])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.A.alignment, self.C.alignment])
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${fill_mode}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'fill_mode': self.fill_mode_name(),
'alignment': "%d" % self.A.alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitRank2KUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self):
self.rank_k_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Rank2K<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation}
>;
"""
self.rank_k_complex_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Rank2K<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation},
${transform_a},
${transform_b},
${blas_mode}
>;
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'fill_mode': FillModeTag[operation.C.fill_mode],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'split_k_serial': 'false',
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'blas_mode': BlasModeTag[operation.blas_mode]
}
rank_k_template = self.rank_k_complex_template if operation.is_complex() else self.rank_k_template
return SubstituteTemplate(rank_k_template, values)
###################################################################################################
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitRank2KConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
RankKKind.Universal: EmitRank2KUniversalInstance,
}
self.rank_k_kind_wrappers = {
RankKKind.Universal: 'Rank2KOperation',
}
self.instance_template = {
RankKKind.Universal: """
${compile_guard_start}
manifest.append(new ${rank_k_kind}<
Operation_${operation_name}
>("${operation_name}"));
${compile_guard_end}
"""
}
self.header_template = """
/*
Generated by rank_2k_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "rank_2k_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.rank_k_kind]()
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.rank_k_kind], {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'rank_k_kind': self.rank_k_kind_wrappers[operation.rank_k_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
| python/cutlass_library/rank_2k_operation.py/0 | {
"file_path": "python/cutlass_library/rank_2k_operation.py",
"repo_id": "python",
"token_count": 5451
} | 41 |
{
"path": "./../../../../examples/python/01_epilogue.ipynb"
}
| python/docs_src/source/externals/01_epilogue.nblink/0 | {
"file_path": "python/docs_src/source/externals/01_epilogue.nblink",
"repo_id": "python",
"token_count": 30
} | 42 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Low-level functionality tests for Conv2d opreations on SM80
"""
import logging
import unittest
import cutlass
from cutlass.backend.utils.device import device_cc
from conv2d_test_utils import *
cutlass.set_log_level(logging.WARNING)
cc = 80
@unittest.skipIf(device_cc() < cc, 'Device compute capability is invalid for SM80 tests.')
class Conv2dSm80(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
conv_problems = get_conv_problems()
# Tests for optimized & analytic
for conv_kind in ["fprop", "wgrad", "dgrad"]:
# F16, simt
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="simt", threadblock_shape=[128, 128, 8],
warp_count=[4, 2, 1], stages=2, instruction_shape=[1, 1, 1])
# F16, tensor op
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16])
# F16, tensor op, analytic iterator
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], iterator_algorithm="analytic")
# F16, tensor op, f32 output
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f32,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16])
# F16, tensor op, different tile description
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 64, 32],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 8])
# F32, simt
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32,
opclass="simt", threadblock_shape=[128, 128, 8],
warp_count=[4, 2, 1], stages=4, instruction_shape=[1, 1, 1])
# Tf32, tensorop
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32,
opclass="tensor_op", threadblock_shape=[128, 128, 16],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 8]
)
# Split-K
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], split_k_mode="serial",
split_k_slices=2)
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], split_k_mode="parallel",
split_k_slices=5)
# Swizzling functor
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 64, 32],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 8], swizzle=4)
# Tests for few channels and fixed channels
# F16, tensor op, few channels
for c, tb, stage, inst in zip([2, 1],
[[128, 128, 64], [128, 128, 32]],
[3, 2],
[[16, 8, 16], [16, 8, 8]]):
add_test(
Conv2dSm80, cc, "fprop", conv2d_few_channel_problemsizes(c), cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=tb,
warp_count=[2, 2, 1], stages=stage, instruction_shape=inst, iterator_algorithm="few_channels"
)
# F16, tensor op, fixed channels
for c in [8, 4, 2]:
add_test(
Conv2dSm80, cc, "fprop", conv2d_few_channel_problemsizes(c), cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], iterator_algorithm="fixed_channels"
)
# Test activations
for activation in ["relu", "leaky_relu"]:
for split_k_mode, split_k_slices in zip(["parallel", "serial", "parallel"], [1, 7, 5]):
add_test(
Conv2dSm80, cc, "fprop", conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], split_k_mode=split_k_mode,
split_k_slices=split_k_slices, activation=activation)
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/conv2d/conv2d_sm80.py/0 | {
"file_path": "test/python/cutlass/conv2d/conv2d_sm80.py",
"repo_id": "test",
"token_count": 2751
} | 43 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implicit GEMM for fused epilogue broadcast testbed
Parallel split-k is not tested because we can just use regular conv kernel
when we need to use parallel-splitk. Broadcast can happen in the reduction
kernel.
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/reduction/device/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "conv2d_problems.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/core_io.h"
#include "cutlass/util/tensor_view_io.h"
#include "../cache_testbed_output.h"
namespace test {
namespace conv {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Conv2d>
struct Conv2dWithBroadcastReferenceOp {
using OutputOp = typename Conv2d::EpilogueOutputOp;
using ElementCompute = typename OutputOp::ElementCompute;
using ElementZ = typename OutputOp::ElementZ;
using ElementT = typename OutputOp::ElementT;
typename OutputOp::BinaryOp binary_op;
typename OutputOp::ElementwiseOp elementwise_op;
Conv2dWithBroadcastReferenceOp() { }
void operator()(ElementZ &Z, ElementT &T, ElementCompute conv2d, ElementCompute bias) {
ElementCompute t_full = binary_op(conv2d, bias);
T = ElementT(t_full);
ElementCompute z_full = elementwise_op(t_full);
Z = ElementZ(z_full);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Fused testbed
//
// Y = CONV(AB, C)
//
// T[n, p, q, k] = ReductionOp(Y[n, p, q, k], Broadcast[k])
//
// Z[n, p, q, k] = Elementwise(T[n, p, q, k])
//
template <
typename Conv2d,
typename ReferenceOp,
bool AddBroadcastFirst = false
>
class TestbedConv2dWithBroadcast {
public:
using ElementA = typename Conv2d::ElementA;
using LayoutA = typename Conv2d::LayoutA;
using ElementB = typename Conv2d::ElementB;
using LayoutB = typename Conv2d::LayoutB;
using ElementC = typename Conv2d::ElementC;
using LayoutC = typename Conv2d::LayoutC;
using ElementAccumulator = typename Conv2d::ElementAccumulator;
using ElementCompute = typename Conv2d::ElementCompute;
using EpilogueOutputOp = typename Conv2d::EpilogueOutputOp;
using ElementZ = typename EpilogueOutputOp::ElementZ;
using ElementT = typename EpilogueOutputOp::ElementT;
using ElementVector = typename EpilogueOutputOp::ElementVector;
static cutlass::conv::Operator const kConvolutionalOperator = Conv2d::kConvolutionalOperator;
static const bool kAddBroadcastFirst = AddBroadcastFirst;
static const bool kStoreT = EpilogueOutputOp::kStoreT;
public:
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementAccumulator, LayoutC> tensor_C_reference;
cutlass::HostTensor<ElementZ, LayoutC> tensor_Z_computed;
cutlass::HostTensor<ElementZ, LayoutC> tensor_Z_reference;
cutlass::HostTensor<ElementT, LayoutC> tensor_T_computed;
cutlass::HostTensor<ElementT, LayoutC> tensor_T_reference;
cutlass::HostTensor<ElementAccumulator, LayoutC> tensor_Y_reference;
cutlass::HostTensor<ElementVector, LayoutC> tensor_Broadcast; // Input Broadcast
public:
TestbedConv2dWithBroadcast(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) {
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
void initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
int scope;
int bits = cutlass::sizeof_bits<Element>::value;
if (bits <= 8) {
scope = 2;
}
else if (bits == 16) {
if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) {
scope = 3;
}
else {
scope = 5;
}
}
else {
scope = 8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope, -scope, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(), view.capacity());
}
else {
}
}
void initialize(
cutlass::conv::Conv2dProblemSize const &problem_size, uint64_t seed = 2019) {
tensor_A.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size));
tensor_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size));
tensor_C.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_C_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_Z_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_Z_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_T_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_T_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_Y_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_Broadcast.resize({
1,
1,
1,
implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size).c(),
});
initialize_tensor(tensor_A.host_view(), init_A, seed);
initialize_tensor(tensor_B.host_view(), init_B, seed * 17);
initialize_tensor(tensor_C.host_view(), init_C, seed * 39);
initialize_tensor(tensor_Broadcast.host_view(), init_C, seed * 39);
for (int n = 0; n < tensor_C_reference.extent().n(); ++n) {
for (int p = 0; p < tensor_C_reference.extent().h(); ++p) {
for (int q = 0; q < tensor_C_reference.extent().w(); ++q) {
for (int k = 0; k < tensor_C_reference.extent().c(); ++k) {
tensor_C_reference.at({n, p, q, k}) = ElementAccumulator(tensor_C.at({n, p, q, k}));
}
}
}
}
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_Broadcast.sync_device();
tensor_C_reference.sync_device();
tensor_Z_computed.sync_device();
tensor_Z_reference.sync_device();
tensor_T_computed.sync_device();
tensor_T_reference.sync_device();
tensor_Y_reference.sync_device();
}
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Conv2d::UnderlyingKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::conv::Conv2dProblemSize const &problem_size,
cutlass::conv::SplitKMode const &split_k_mode = cutlass::conv::SplitKMode::kSerial,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(1)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
#if 0 //display conv2d problem size for debugging
std::cout << problem_size << std::endl
<< "alpha, beta: (" << alpha << ", " << beta << ")" << std::endl
<< "split_k_mode: " << ((split_k_mode == cutlass::conv::SplitKMode::kSerial) ? "(serial)" : "(parallel)") << std::endl
<< std::endl;
#endif
initialize(problem_size);
// configure the operator
Conv2d conv2d_op;
typename Conv2d::Arguments conv2d_args(
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_Z_computed.device_ref(),
{alpha, beta},
split_k_mode,
tensor_Broadcast.device_data(),
kStoreT ? tensor_T_computed.device_data() : nullptr,
0, // This must be zero
implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size).c()
);
// initialize the kernel
size_t workspace_size = Conv2d::get_workspace_size(conv2d_args);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = conv2d_op.initialize(conv2d_args, workspace.get());
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
// run conv2d operator
status = conv2d_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
bool passed = false;
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " device reference error: "
<< cudaGetErrorString(result);
tensor_T_computed.sync_host();
tensor_Z_computed.sync_host();
//
// Reference check
//
// When kAddBroadcastFirst is true, add bias on the host
ElementCompute beta_ref = kAddBroadcastFirst ? ElementCompute(0) : beta;
#if CUTLASS_CONV_TEST_UNIT_REFERENCE_DEVICE_ENABLED
cutlass::reference::device::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementAccumulator,
LayoutC,
ElementAccumulator,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C_reference.device_ref(),
tensor_Y_reference.device_ref(),
alpha,
beta_ref);
// sync host (copy device data to host) for dumping error output in case of mismatches
tensor_Y_reference.sync_host();
#else
cutlass::reference::host::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementAccumulator,
LayoutC,
ElementAccumulator,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size,
tensor_A.host_ref(),
tensor_B.host_ref(),
tensor_C_reference.host_ref(),
tensor_Y_reference.host_ref(),
alpha,
beta_ref);
#endif
ReferenceOp reference_op;
// compute tensor Z and tensor T
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < (kConvolutionalOperator == cutlass::conv::Operator::kFprop ? problem_size.P : problem_size.H); ++p) {
for (int q = 0; q < (kConvolutionalOperator == cutlass::conv::Operator::kFprop ? problem_size.Q : problem_size.W); ++q) {
for (int k = 0; k < (kConvolutionalOperator == cutlass::conv::Operator::kFprop ? problem_size.K : problem_size.C); ++k) {
ElementZ z{};
ElementT t{};
ElementCompute accum = tensor_Y_reference.at({n, p, q, k});
ElementCompute bias = ElementCompute(tensor_Broadcast.at({0, 0, 0, k}));
if (kAddBroadcastFirst) {
reference_op(z, t, accum + bias,
beta * ElementCompute(tensor_C_reference.at({n, p, q, k})));
} else {
reference_op(z, t, accum, bias);
}
tensor_Z_reference.at({n, p, q, k}) = z;
tensor_T_reference.at({n, p, q, k}) = t;
}
}
}
}
if (kStoreT) {
passed = cutlass::reference::host::TensorEquals(
tensor_T_computed.host_view(),
tensor_T_reference.host_view());
EXPECT_TRUE(passed);
}
passed = cutlass::reference::host::TensorEquals(
tensor_Z_computed.host_view(),
tensor_Z_reference.host_view());
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Conv2d_ImplicitGemm_device_"
<< (split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial_reduction_" : "parallel_reduction_")
<< (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kFprop ? "fprop_" :
(Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ? "dgrad_" :
(Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDeconv ? "deconv_" : "wgrad_")))
<< "nhwc_"
<< problem_size.N << "x"
<< problem_size.H << "x"
<< problem_size.W << "x"
<< problem_size.C
<< "_krsc_"
<< problem_size.K << "x"
<< problem_size.R << "x"
<< problem_size.S << "x"
<< problem_size.C
<< "_padding_"
<< problem_size.pad_h << "x"
<< problem_size.pad_w
<< "_stride_"
<< problem_size.stride_h << "x"
<< problem_size.stride_w
<< "_dilation_"
<< problem_size.dilation_h << "x"
<< problem_size.dilation_w << "_"
<< (problem_size.mode == cutlass::conv::Mode::kCrossCorrelation ? "xcorr_" : "conv_")
<< Conv2d::ThreadblockShape::kM << "x"
<< Conv2d::ThreadblockShape::kN << "x"
<< Conv2d::ThreadblockShape::kK << "_"
<< Conv2d::WarpShape::kM << "x"
<< Conv2d::WarpShape::kN << "x"
<< Conv2d::WarpShape::kK << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results << problem_size << std::endl;
results
<< "\nA:\n" << tensor_A.host_view() << "\n"
<< "\nB:\n" << tensor_B.host_view() << "\n"
<< "\nC:\n" << tensor_C.host_view() << "\n"
<< "\nBroadcast:\n" << tensor_Broadcast.host_view() << "\n"
<< "\nY reference:\n" << tensor_Y_reference.host_view() << "\n"
<< "\nT reference:\n" << tensor_T_reference.host_view() << "\n"
<< "\nT computed:\n" << tensor_T_computed.host_view() << "\n"
<< "\nZ reference:\n" << tensor_Z_reference.host_view() << "\n"
<< "\nZ computed:\n" << tensor_Z_computed.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ImplicitGemm,
typename ReferenceOp = Conv2dWithBroadcastReferenceOp<ImplicitGemm>,
bool AddBroadcastFirst = false>
bool TestSpecificConv2dWithBroadcast(
const Conv2dProblemVector & problem_sizes) {
bool passed = true;
//
// Testbed object
//
TestbedConv2dWithBroadcast<ImplicitGemm, ReferenceOp, AddBroadcastFirst> testbed;
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for(auto conv_problem : problem_sizes) {
//
// Test
//
// test mode = xcross
passed = testbed.run(
conv_problem,
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// test mode = convolution
passed = testbed.run(
conv_problem.reset_mode(cutlass::conv::Mode::kConvolution),
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// TestAllConv: Runs cutlass::conv::device::ImplicitGemmConvolution operator and compares it with reference
// TestAllConv runs conv operator on default conv problem sizes from test::conv::device::TestbedConv2dProblemSizes
// Additionally, each conv2d test can provide conv problem sizes (conv_test_sizes) and blacklist of sizes
// (conv_blacklist_sizes)
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ImplicitGemm,
typename ReferenceOp = Conv2dWithBroadcastReferenceOp<ImplicitGemm>,
bool AddBroadcastFirst = false,
bool TestSplitK = true
>
bool TestAllConv2dWithBroadcast(
const Conv2dProblemVector &conv_test_sizes = Conv2dProblemVector(),
const Conv2dProblemVector &conv_blacklist_sizes = Conv2dProblemVector()) {
bool passed = true;
//
// Testbed object
//
TestbedConv2dWithBroadcast<ImplicitGemm, ReferenceOp, AddBroadcastFirst> testbed;
//
// Get conv problem sizes to run conv operator
//
TestbedConv2dProblemSizes conv_problems(128/cutlass::sizeof_bits<typename ImplicitGemm::ElementA>::value);
// Vector of conv2d problem sizes to avoid duplicate runs
Conv2dProblemVector conv_tested_sizes;
Conv2dProblemVector const *problem_vectors[] = {
&conv_test_sizes, // run user specified sizes
&conv_problems.conv2d_default_sizes, // run default and cudnn bug sizes
&conv_problems.conv2d_resnet50_sizes, // run resnet50 sizes
#if CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED
&conv_problems.conv2d_rigorous_sizes, // run large and rigorous sizes if enabled
#endif
};
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for (Conv2dProblemVector const * problem_vector : problem_vectors) {
// Run conv testbed on default convolution sizes
for(auto conv_problem : *problem_vector) {
// Skip blacklist and avoid duplicate problem sizes
if (std::find(conv_blacklist_sizes.begin(), conv_blacklist_sizes.end(), conv_problem) != conv_blacklist_sizes.end() ||
std::find(conv_tested_sizes.begin(), conv_tested_sizes.end(), conv_problem) != conv_tested_sizes.end()) {
continue;
}
//
// Procedurally disable certain cases
//
// CUTLASS DGRAD's *unity* stride specialization only support stride {1, 1}
if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ||
ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kUnity)) {
if (!((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) {
continue;
}
}
#if 0 // relax restrictions on analytic strided dgrad
// CUTLASS DGRAD's *strided* specialization only support stride >= {2, 2}
if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ||
ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kStrided)) {
if (((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) {
continue;
}
}
#endif
//
// Test
//
// push back tested problem size to avoid re-running duplicates
conv_tested_sizes.push_back(conv_problem);
// test mode = xcross
passed = testbed.run(
conv_problem,
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// test mode = convolution
passed = testbed.run(
conv_problem.reset_mode(cutlass::conv::Mode::kConvolution),
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
}
}
// CUTLASS DGRAD's *strided* specialization does not support split-k mode
if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ||
ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kStrided)) {
passed = testbed.run(
cutlass::conv::Conv2dProblemSize(
{1, 56, 56, 8}, // input size (NHWC)
{8, 1, 1, 8}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1}), // dilation (dilation_h, dilation_w)
cutlass::conv::SplitKMode::kSerial,
cutlass::from_real<typename ImplicitGemm::ElementCompute>(2.0),
cutlass::from_real<typename ImplicitGemm::ElementCompute>(2.0));
if (!passed) {
return false;
}
return passed;
}
if (!TestSplitK)
return passed;
// Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for
// a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// alpha and beta for local testing, but only runs one value for alpha and beta.
cutlass::conv::Conv2dProblemSize conv2d_split_k_test_size (
{1, 17, 11, 288}, // input size (NHWC)
{160, 3, 3, 288}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
);
cutlass::conv::SplitKMode split_k_modes [] = {
cutlass::conv::SplitKMode::kSerial
};
int split_k_slices[] = {
1, 2, 3, 4, 201
};
double problem_alpha[] = {
2.0
};
double problem_beta[] = {
2.0
};
for (auto split_k_mode : split_k_modes) {
for (auto split_k_slice : split_k_slices) {
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
passed = testbed.run(
conv2d_split_k_test_size.reset_split_k_slices(split_k_slice),
split_k_mode,
cutlass::from_real<typename ImplicitGemm::ElementCompute>(alpha),
cutlass::from_real<typename ImplicitGemm::ElementCompute>(beta));
if (!passed) {
return false;
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace conv
} // namespace test
| test/unit/conv/device/conv2d_with_broadcast_testbed.h/0 | {
"file_path": "test/unit/conv/device/conv2d_with_broadcast_testbed.h",
"repo_id": "test",
"token_count": 10107
} | 44 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implicit GEMM testbed for 3.x API
*/
#pragma once
#include "cutlass/cutlass.h"
#include "../../common/cutlass_unit_test.h"
#include "cute/tensor.hpp"
#include "cutlass/kernel_hardware_info.hpp"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/convnd_problem_shape.hpp"
#include "../test/unit/gemm/device/gemm_testbed_3x.hpp"
#include "thrust/universal_vector.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/reference/host/conv.hpp"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "conv_problem_sizes.hpp"
#include "../cache_testbed_output.h"
#include <iostream>
#include "cute/layout.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test::conv::device {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Initializes a flat device buffer
template <typename Element>
static void
initialize_values(
thrust::universal_vector<Element>& dst_ptr,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (cutlass::Distribution::Uniform == dist_kind) {
int scope;
int bits = cutlass::sizeof_bits<Element>::value;
if (bits <= 8) {
scope = 2;
}
else if (bits == 16) {
scope = 4;
}
else {
scope = 8;
}
cutlass::reference::host::BlockFillRandomUniform(
dst_ptr.data().get(), dst_ptr.size(), seed, scope, -scope, 0);
}
else if (cutlass::Distribution::Identity == dist_kind) {
cutlass::reference::host::BlockFillRandomUniform(
dst_ptr.data().get(), dst_ptr.size(), seed, 0, 0, 0);
}
else if (cutlass::Distribution::Gaussian == dist_kind) {
cutlass::reference::host::BlockFillRandomGaussian(dst_ptr.data().get(), dst_ptr.size(), seed, 0, 0.5);
}
else if (cutlass::Distribution::Sequential == dist_kind) {
cutlass::reference::host::BlockFillSequential(dst_ptr.data().get(), dst_ptr.size());
}
else {
std::cerr << "Invalid distribution kind!\n.";
exit(1);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// utils for sparse or dense conv parameters
template <class Conv>
struct DenseConvParams {
// Default Kernel data types
using ElementA = typename Conv::ConvKernel::ElementA;
using ElementB = typename Conv::ConvKernel::ElementB;
static constexpr cutlass::conv::Operator ConvOp = Conv::DispatchPolicy::ConvOp;
static constexpr int NumSpatialDimensions = Conv::NumSpatialDimensions;
using ProblemShape = cutlass::conv::ConvProblemShape<ConvOp, NumSpatialDimensions>;
// get the default arguments without sparse data
auto get_mainloop_arguments(
ProblemShape const& problem_shape,
thrust::universal_vector<ElementA>& tensor_A,
thrust::universal_vector<ElementB>& tensor_B
) {
auto args = typename Conv::ConvKernel::MainloopArguments {
problem_shape,
tensor_A.data().get(),
tensor_B.data().get(),
};
return args;
}
};
template <class Conv>
struct SparseConvParams {
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <class Conv, bool isSparseEnabled_ = false>
struct ConvTestbed {
// Kernel data types
using ElementA = typename Conv::ConvKernel::ElementA;
using ElementB = typename Conv::ConvKernel::ElementB;
using ElementC = cute::conditional_t<cute::is_void_v<typename Conv::ConvKernel::ElementC>,
typename Conv::ConvKernel::ElementD, typename Conv::ConvKernel::ElementC>;
using ElementD = typename Conv::ConvKernel::ElementD;
using ElementAccumulator = typename Conv::ConvKernel::ElementAccumulator;
// ConvTest for sparse kernel
static constexpr bool isSparseEnabled = isSparseEnabled_;
using ConvParams = cute::conditional_t<isSparseEnabled, SparseConvParams<Conv>, DenseConvParams<Conv>>;
ConvParams params;
//
// FusionOperation derived types/queries
//
using FusionOp = typename Conv::EpilogueOutputOp;
// fusion types are potentially void if the fusion is not supported
// helper so we don't try to construct HostTensor with void type
template <typename T, typename U = uint8_t>
using non_void_t = cute::conditional_t<cute::is_void_v<T>, U, T>;
using ElementScalar = typename FusionOp::ElementScalar;
using ElementCompute = typename FusionOp::ElementCompute;
using BiasType = typename cutlass::epilogue::collective::detail::IsThreadEpilogueOpWithBias<FusionOp>::type;
using ElementBias = non_void_t<BiasType>;
using ActivationType = non_void_t<typename cutlass::epilogue::collective::detail::IsThreadEpilogueOpWithActivation<FusionOp>::type,
cutlass::epilogue::thread::Identity<ElementCompute>>;
static constexpr bool IsActivationEnabled = cutlass::epilogue::collective::detail::IsThreadEpilogueOpWithActivation<FusionOp>::value;
using ActivationFunctor = cute::conditional_t<IsActivationEnabled, ActivationType, cutlass::epilogue::thread::Identity<ElementCompute>>;
static constexpr bool IsBiasEnabled = cutlass::epilogue::collective::detail::IsThreadEpilogueOpWithBias<FusionOp>::value &&
!cute::is_same_v<BiasType, void>;
static constexpr bool DisableSource = cute::is_void_v<typename FusionOp::ElementSource>;
using StrideC = typename Conv::ConvKernel::StrideC;
using StrideD = typename Conv::ConvKernel::StrideD;
using ThreadEpilogueOp = typename Conv::ConvKernel::CollectiveEpilogue::ThreadEpilogueOp;
static constexpr cutlass::conv::Operator ConvOp = Conv::DispatchPolicy::ConvOp;
static constexpr int NumSpatialDimensions = Conv::NumSpatialDimensions;
using ProblemShape = cutlass::conv::ConvProblemShape<ConvOp, NumSpatialDimensions>;
using RasterOrderOptions = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::RasterOrderOptions;
using DecompositionMode = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamKParams::DecompositionMode;
using MaxSwizzleSize = typename gemm::device::detail::MaxSwizzleSize;
using Splits = typename gemm::device::detail::Splits;
using Schedule = typename Conv::DispatchPolicy::Schedule;
/// Initialization
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform;
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform;
cutlass::Distribution::Kind init_C = cutlass::Distribution::Uniform;
cutlass::Distribution::Kind init_bias = cutlass::Distribution::Uniform;
cutlass::Distribution::Kind init_disable = cutlass::Distribution::Identity; // all zeros
uint64_t seed = 6090;
float epsilon = 0.0f;
int split_p_slices = 1;
thrust::universal_vector<ElementA> tensor_A;
thrust::universal_vector<ElementB> tensor_B;
thrust::universal_vector<ElementC> tensor_C;
thrust::universal_vector<ElementD> tensor_D_computed;
thrust::universal_vector<ElementD> tensor_D_reference;
thrust::universal_vector<ElementBias> tensor_bias;
thrust::universal_vector<ElementScalar> tensor_alpha;
thrust::universal_vector<ElementScalar> tensor_beta;
// Return true on success, else false
bool initialize(ProblemShape const& problem_shape, uint64_t seed = 6090) {
tensor_A.resize(sizeof(ElementA) * problem_shape.size_A());
tensor_B.resize(sizeof(ElementB) * problem_shape.size_B());
tensor_C.resize(sizeof(ElementC) * problem_shape.size_C());
tensor_D_computed.resize(sizeof(ElementD) * problem_shape.size_C());
tensor_D_reference.resize(sizeof(ElementD) * problem_shape.size_C());
tensor_bias.resize(sizeof(ElementBias) * cute::size(cute::get<0>(problem_shape.get_shape_B())));
initialize_values(tensor_A, init_A, seed);
initialize_values(tensor_B, init_B, seed * 11);
initialize_values(tensor_C, init_C, seed * 17);
initialize_values(tensor_bias, init_bias, seed * 19);
bool flag = true;
if constexpr (isSparseEnabled) {
flag &= params.initialize(problem_shape, tensor_B, static_cast<int>(seed + 2023));
}
return flag;
}
// Determine SMEM requirements and waive if not satisfied
bool sufficient() const {
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
int max_smem_size;
result = cudaDeviceGetAttribute(&max_smem_size, cudaDevAttrMaxSharedMemoryPerBlockOptin, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaDeviceGetAttribute() failed");
}
return max_smem_size >= Conv::ConvKernel::SharedStorageSize;
}
// Executes one test
bool run(
ProblemShape const& problem_shape,
ElementScalar alpha = ElementScalar(1),
ElementScalar beta = ElementScalar(0)
,
RasterOrderOptions raster_order = RasterOrderOptions::Heuristic,
MaxSwizzleSize max_swizzle = MaxSwizzleSize{},
Splits splits = Splits{},
DecompositionMode decomposition_mode = DecompositionMode::Heuristic
) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device.\n";
}
return true;
}
bool ret = initialize(problem_shape);
if (!ret) {
std::cerr << "initialize failed for the given problem_shape: \n";
return false;
}
cutlass::KernelHardwareInfo hw_info;
cudaGetDevice(&hw_info.device_id);
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
// configure the operator
Conv conv_op;
auto stride_C = StrideC{};
auto stride_D = StrideD{};
if constexpr (ConvOp == cutlass::conv::Operator::kWgrad) {
stride_C = cutlass::make_cute_packed_stride(
StrideC{}, problem_shape.shape_C, problem_shape.stride_C, ConvOp);
stride_D = cutlass::make_cute_packed_stride(
StrideD{}, problem_shape.shape_C, problem_shape.stride_C, ConvOp);
}
// Need to support non-packed output strides for fprop and dgrad kernel.
else {
cute::for_each(cute::make_seq<cute::rank<0>(StrideC{})>{}, [&](auto i) {
cute::get<0, i>(stride_C) = problem_shape.stride_C[ProblemShape::RankT-2-i];
});
cute::for_each(cute::make_seq<cute::rank<0>(StrideD{})>{}, [&](auto i) {
cute::get<0, i>(stride_D) = problem_shape.stride_C[ProblemShape::RankT-2-i];
});
}
using RasterOrderOptions = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::RasterOrderOptions;
using DecompositionMode = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamKParams::DecompositionMode;
typename Conv::ConvKernel::TileScheduler::Arguments scheduler_args{};
if constexpr (cute::is_same_v<typename Conv::ConvKernel::TileScheduler::Arguments, cutlass::gemm::StreamKScheduler>) {
scheduler_args = { static_cast<int>(splits), static_cast<int>(max_swizzle), raster_order, decomposition_mode };
}
auto mainloop_args = params.get_mainloop_arguments(problem_shape, tensor_A, tensor_B);
auto epilogue_args = typename Conv::ConvKernel::EpilogueArguments {
{},
tensor_C.data().get(),
stride_C,
tensor_D_computed.data().get(),
stride_D,
};
auto args = typename Conv::Arguments {
mainloop_args, // MainloopArguments
epilogue_args, // EpilogueArguments
hw_info,
scheduler_args
};
auto &fusion_args = args.epilogue.thread;
fusion_args.alpha = alpha;
fusion_args.beta = beta;
if constexpr (IsBiasEnabled) {
fusion_args.bias_ptr = tensor_bias.data().get();
}
// Clamp bound
if constexpr (cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::Clamp<ElementCompute>>) {
fusion_args.activation.lower_bound = CUTLASS_STL_NAMESPACE::numeric_limits<ElementCompute>::lowest();
fusion_args.activation.upper_bound = CUTLASS_STL_NAMESPACE::numeric_limits<ElementCompute>::max();
}
// Scale
if constexpr (cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::ScaledGELU_taylor<ElementCompute>> ||
cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::ScaledGELU<ElementCompute>>) {
fusion_args.activation.scale = ElementCompute{1};
}
cutlass::Status status = cutlass::Status::kInvalid;
status = conv_op.can_implement(args);
EXPECT_EQ(conv_op.can_implement(args), cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
std::cerr << "can_implement failed for the given problem_shape: \n";
print(problem_shape);
return false;
}
// find workspace requirement for parallel split-k reduction
size_t workspace_size = Conv::get_workspace_size(args);
thrust::universal_vector<uint8_t> workspace(workspace_size);
status = conv_op.initialize(args, workspace.data().get());
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
// run conv3d operator
status = conv_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
bool passed = false;
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " Kernel execution error: "
<< cudaGetErrorString(result);
// Create cute::Tensors using the logical rank-3 MNK multi-mode shapes the mainloop gives us
auto shape_mA = cute::reverse(problem_shape.shape_A);
auto shape_mB = cute::reverse(problem_shape.shape_B);
auto shape_mC = cute::reverse(problem_shape.shape_C);
auto shape_mBias = cute::make_shape(cute::size(cute::get<0>(problem_shape.get_shape_B())));
auto stride_mA = cute::reverse(problem_shape.stride_A);
auto stride_mB = cute::reverse(problem_shape.stride_B);
auto stride_mC = cute::reverse(problem_shape.stride_C);
auto mA = make_tensor(tensor_A.data().get(), make_layout(shape_mA, stride_mA));
auto mB = make_tensor(tensor_B.data().get(), make_layout(shape_mB, stride_mB));
auto mC = make_tensor(tensor_C.data().get(), make_layout(shape_mC, stride_mC));
auto mD_ref = make_tensor(tensor_D_reference.data().get(), make_layout(shape_mC, stride_mC));
auto mD_computed = make_tensor(tensor_D_computed.data().get(), make_layout(shape_mC, stride_mC));
auto mBias = make_tensor(tensor_bias.data().get(), make_layout(shape_mBias));
auto mAlpha = make_tensor(tensor_alpha.data().get(), make_layout(shape_mBias));
auto mBeta = make_tensor(tensor_beta.data().get(), make_layout(shape_mBias));
cutlass::reference::host::ConvEpilogueFusionParams<
ElementAccumulator,
ElementScalar,
ElementCompute,
ElementC,
ElementD,
decltype(mAlpha),
decltype(mBeta),
decltype(mBias),
ActivationFunctor>
epilogue_fusion_params{};
epilogue_fusion_params.alpha = alpha;
epilogue_fusion_params.beta = beta;
if constexpr (IsBiasEnabled) {
epilogue_fusion_params.tensor_bias = mBias;
}
auto padding = cute::reverse(problem_shape.lower_padding);
auto tstride = cute::reverse(problem_shape.traversal_stride);
auto dilation = cute::reverse(problem_shape.dilation);
cutlass::reference::host::ConvReferenceImpl<
ConvOp,
NumSpatialDimensions,
decltype(mA),
decltype(mB),
decltype(mC),
decltype(mD_ref),
decltype(padding),
decltype(tstride),
decltype(dilation),
decltype(epilogue_fusion_params)>
reference_impl(mA, mB, mC, mD_ref, padding, tstride, dilation, epilogue_fusion_params);
//
// Reference check - support caching results
//
CachedTestKey cached_test_key = CreateCachedConvNd3xTestKey<
ProblemShape,
ElementA,
ElementB,
ElementC,
ElementD
>(
ConvOp,
problem_shape,
alpha,
beta,
tensor_A,
tensor_B,
tensor_C
);
//
// Look for the cached key
//
bool cached_result_loaded = false;
CachedTestResult cached_test_result;
std::string convnd_result_cache_name =
std::string("cached_results_") + CUTLASS_TARGET_NAME + ".txt";
#if (CUTLASS_TEST_ENABLE_CACHED_RESULTS)
CachedTestResultListing cached_results(convnd_result_cache_name);
auto cached = cached_results.find(cached_test_key);
cached_result_loaded = cached.first;
if (cached_result_loaded) {
cached_test_result = cached.second;
}
#endif
if (!cached_result_loaded) {
// Compute reference
reference_impl.compute_reference();
#if (CUTLASS_TEST_ENABLE_CACHED_RESULTS)
cached_test_result.D = TensorHash(tensor_D_reference);
CachedTestResultListing cached_results(convnd_result_cache_name);
cached_results.append(cached_test_key, cached_test_result);
cached_results.write(convnd_result_cache_name);
#endif
} // if (!cached_result_loaded)
#if (CUTLASS_TEST_ENABLE_CACHED_RESULTS)
uint32_t tensor_D_computed_hash = TensorHash(tensor_D_computed);
passed = (tensor_D_computed_hash == cached_test_result.D);
// If hash fails, double check against reference implementation.
if(!passed) {
std::cerr << "Hash-based comparison unsuccessful for key:" << "\n" << cached_test_key
<< ", comparing with reference implementation now.\n";
if (cached_result_loaded) {
// Compute reference
reference_impl.compute_reference();
}
// Validate kernel against reference
passed = compare_reference(mD_ref, mD_computed, mA, mB, mAlpha, mBeta, mBias, this->epsilon);
}
#else
// Validate kernel against reference
passed = compare_reference(mD_ref, mD_computed, mA, mB, mAlpha, mBeta, mBias, this->epsilon);
#endif
EXPECT_TRUE(passed);
return passed;
}
template<
class Engine, class Layout,
class EngineA, class LayoutA,
class EngineB, class LayoutB,
class EngineAlpha, class LayoutAlpha,
class EngineBeta, class LayoutBeta,
class EngineBias, class LayoutBias>
static constexpr bool
compare_reference(
cute::Tensor<Engine, Layout> const& reference,
cute::Tensor<Engine, Layout> const& computed,
cute::Tensor<EngineA, LayoutA> const& A,
cute::Tensor<EngineB, LayoutB> const& B,
cute::Tensor<EngineAlpha, LayoutAlpha> const& tensor_alpha,
cute::Tensor<EngineBeta, LayoutBeta> const& tensor_beta,
cute::Tensor<EngineBias, LayoutBias> const& tensor_bias,
float epsilon = 0.0f) {
if (size(reference) != size(computed)) {
return false;
}
bool passed = true;
if (epsilon == 0.0f) {
// fast refcheck w/o epsilon
for (size_t i = 0; i < size_t(size(reference)); ++i) {
if (reference(i) != computed(i)) {
passed = false;
printf("[%llu] %f, %f\n", static_cast<unsigned long long>(i),
float(reference(i)), float(computed(i)));
break;
}
}
} else {
// refcheck with epsilon
for (size_t i = 0; i < size_t(size(reference)); ++i) {
auto ref = static_cast<float>(reference(i));
auto act = static_cast<float>(computed(i));
auto abs_error = std::abs(act - ref);
auto rel_error = abs_error / (std::max(std::abs(act), std::abs(ref)) + 0.00001f);
if (std::isnan(abs_error) || std::isnan(rel_error) ||
std::min(abs_error, rel_error) > epsilon) {
passed = false;
printf("[%llu] %f, %f\n", static_cast<unsigned long long>(i),
float(reference(i)), float(computed(i)));
break;
}
}
}
#if CUTLASS_DEBUG_TRACE_LEVEL > 1
if (not passed) {
cute::print("Reference:");
cute::print_tensor(reference);
cute::print("\nComputed:");
cute::print_tensor(computed);
cute::print("\n");
for (size_t i = 0; i < size_t(size(A)); ++i) {
printf("[%llu]: A = %f\n", static_cast<unsigned long long>(i), float(A(i)));
}
for (size_t i = 0; i < size_t(size(B)); ++i) {
printf("[%llu]: B = %f\n", static_cast<unsigned long long>(i), float(B(i)));
}
if constexpr (IsBiasEnabled) {
for (size_t i = 0; i < size_t(size(tensor_bias)); ++i) {
printf("[%llu]: bias = %f\n", static_cast<unsigned long long>(i),
float(tensor_bias(i)));
}
}
for (size_t i = 0; i < size_t(size(reference)); ++i) {
printf("[%llu]: ref = %f, computed = %f\n", static_cast<unsigned long long>(i),
float(reference(i)), float(computed(i)));
}
}
#endif
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Conv, bool SupportStrides = (Conv::DispatchPolicy::ConvOp != cutlass::conv::Operator::kDgrad)>
bool TestAllConv(double alpha = 1.0, double beta = 0.0, float epsilon = 0.0f
) {
using ElementScalar = typename Conv::EpilogueOutputOp::ElementScalar;
bool passed = true;
ConvTestbed<Conv> testbed;
testbed.epsilon = epsilon;
auto problem_vector = get_conv_problem_vector<
Conv::NumSpatialDimensions, Conv::DispatchPolicy::ConvOp, SupportStrides>();
using DecompositionMode = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamKParams::DecompositionMode;
using RasterOrderOptions = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::RasterOrderOptions;
using MaxSwizzleSize = typename gemm::device::detail::MaxSwizzleSize;
using Splits = typename gemm::device::detail::Splits;
std::vector<DecompositionMode> decomposition_modes = {DecompositionMode::Heuristic};
static constexpr bool UsesStreamKScheduler = cute::is_same_v<typename Conv::ConvKernel::TileSchedulerTag, cutlass::gemm::StreamKScheduler>;
if constexpr (UsesStreamKScheduler) {
decomposition_modes.push_back(DecompositionMode::DataParallel);
decomposition_modes.push_back(DecompositionMode::SplitK);
decomposition_modes.push_back(DecompositionMode::StreamK);
}
for (auto conv_problem : problem_vector) {
#if CUTLASS_DEBUG_TRACE_LEVEL > 0
print(conv_problem);
#endif
for (DecompositionMode decomp_mode : decomposition_modes) {
std::vector problem_splits = {Splits{1}};
if (decomp_mode == DecompositionMode::Heuristic || decomp_mode == DecompositionMode::SplitK) {
problem_splits.push_back(Splits{2});
}
for (auto splits : problem_splits) {
passed = testbed.run(
conv_problem,
cutlass::from_real<ElementScalar>(alpha),
cutlass::from_real<ElementScalar>(beta)
,RasterOrderOptions::Heuristic, // raster_order
MaxSwizzleSize(1),
splits,
decomp_mode
);
if (!passed) {
printf("Failed test for "); print(conv_problem);
return false;
}
} // splits
} // decomposition_mode
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace test::conv::device
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/conv/device_3x/testbed_conv.hpp/0 | {
"file_path": "test/unit/conv/device_3x/testbed_conv.hpp",
"repo_id": "test",
"token_count": 9784
} | 45 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Unit tests for the small matrix class.
*/
#include <iostream>
#include "../common/cutlass_unit_test.h"
#include "cutlass/matrix.h"
#include "cutlass/core_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix, elementwise_add) {
using Matrix4x4 = cutlass::Matrix4x4<float>;
Matrix4x4 A = {
1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16
};
Matrix4x4 B = A.transpose();
Matrix4x4 C = A.add(B * 2.125f);
bool passed = true;
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
float got = C.at(i, j);
float expected = A.at(i, j) + A.at(j, i) * 2.125f;
if (got != expected) {
passed = false;
}
}
}
EXPECT_TRUE(passed);
if (!passed) {
std::cout << "A:\n" << A << "\n\nB:\n" << B << "\n\nC:\n" << C << std::endl;
}
}
TEST(Matrix, elementwise_multiply) {
using Matrix4x4 = cutlass::Matrix4x4<float>;
Matrix4x4 A = {
1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16
};
Matrix4x4 B = A.transpose();
Matrix4x4 C = A.multiply(B);
bool passed = true;
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
float got = C.at(i, j);
float expected = A.at(i, j) * A.at(j, i);
if (got != expected) {
passed = false;
}
}
}
EXPECT_TRUE(passed);
if (!passed) {
std::cout << "A:\n" << A << "\n\nB:\n" << B << "\n\nC:\n" << C << std::endl;
}
}
TEST(Matrix, product_4x4_overloads) {
using Matrix4x4 = cutlass::Matrix4x4<float>;
Matrix4x4 A = {
1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16
};
Matrix4x4 B = {
-1, -2, 0, 4,
1, 2, 1, 1,
3, 2, 1, 1,
1, 0, 8, 2
};
Matrix4x4 C = Matrix4x4::identity();
Matrix4x4 D = A * B + C;
bool passed = true;
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
float got = D.at(i, j);
float expected = (i == j ? 1.0f : 0);
for (int k = 0; k < 4; ++k) {
expected += A.at(i, k) * B.at(k, j);
}
if (got != expected) {
passed = false;
}
}
}
EXPECT_TRUE(passed);
if (!passed) {
std::cout << "A:\n" << A << "\n\nB:\n" << B << "\n\nC:\n" << C << "\n\nD:\n" << D << std::endl;
}
}
TEST(Matrix, product_4x4) {
using Matrix4x4 = cutlass::Matrix4x4<float>;
Matrix4x4 A = {
1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16
};
Matrix4x4 B = {
-1, -2, 0, 4,
1, 2, 1, 1,
3, 2, 1, 1,
1, 0, 8, 2
};
Matrix4x4 C = Matrix4x4::identity();
// Compute product with optional source accumulator
Matrix4x4 D = A.product(B, C);
bool passed = true;
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
float got = D.at(i, j);
float expected = (i == j ? 1.0f : 0.0f);
for (int k = 0; k < 4; ++k) {
expected += A.at(i, k) * B.at(k, j);
}
if (got != expected) {
passed = false;
}
}
}
EXPECT_TRUE(passed);
if (!passed) {
std::cout << "A:\n" << A << "\n\nB:\n" << B << "\n\nC:\n" << C << "\n\nD:\n" << D << std::endl;
}
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
float c = (i == j ? 1.0f : 0.0f);
EXPECT_TRUE(A.row(i).dot(B.column(j)) + c == D.at(i, j));
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/core/matrix.cu/0 | {
"file_path": "test/unit/core/matrix.cu",
"repo_id": "test",
"token_count": 2184
} | 46 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/atom/copy_traits_sm75.hpp>
using namespace cute;
template <class T>
__global__ void
ldsm_test_device(uint16_t* g_in, uint16_t* g_out)
{
constexpr int count = sizeof(T) / 4;
int tid = threadIdx.x;
int stride = blockDim.x;
// load input gmem -> smem
__shared__ uint32_t smem[32 * count];
for (int i = 0; i < count; ++i) {
smem[tid + (stride * i)] = reinterpret_cast<uint32_t*>(g_in)[tid + (stride * i)];
}
__syncthreads();
uint32_t reg[count];
for (int i = 0; i < count; ++i) {
reg[i] = 0;
}
// load smem -> rmem using LDSM
uint128_t* smem_ptr = reinterpret_cast<uint128_t*>(smem) + tid;
T* rmem_ptr = reinterpret_cast<T*>(reg);
cute::copy_ldsm(smem_ptr, rmem_ptr);
// store output rmem -> gmem
for (int i = 0; i < count; ++i) {
reinterpret_cast<uint32_t*>(g_out)[tid + (stride * i)] = reg[i];
}
}
template <class TiledCopy, class SmemLayout>
__global__ void
ldsm_test_device_cute(uint16_t* g_in, uint16_t* g_out,
TiledCopy tiled_copy, SmemLayout smem_layout)
{
using namespace cute;
__shared__ uint16_t smem[size(smem_layout)];
auto t_g_in = make_tensor(make_gmem_ptr(g_in), smem_layout);
auto t_g_out = make_tensor(make_gmem_ptr(g_out), smem_layout);
auto t_smem = make_tensor(make_smem_ptr(smem), smem_layout);
int tid = threadIdx.x;
// Load input gmem -> smem
for (int i = tid; i < size(t_smem); i += size(tiled_copy)) {
t_smem(i) = t_g_in(i);
}
__syncthreads();
auto thr_copy = tiled_copy.get_thread_slice(tid);
auto tXsX = thr_copy.partition_S(t_smem); // (V,M,N)
auto tXgX = thr_copy.partition_D(t_g_out); // (V,M,N)
auto tXrX = make_tensor<uint16_t>(shape(tXgX)); // (V,M,N)
clear(tXrX); // Just to make sure
/*
if (thread0()) {
print("tXsX: " ); print(tXsX.layout()); print("\n");
print("tXgX: " ); print(tXgX.layout()); print("\n");
print("tXrX: " ); print(tXrX.layout()); print("\n");
}
*/
// Copy smem -> rmem via tiled_copy (LDSM, LDS)
copy(tiled_copy, tXsX, tXrX);
// Output rmem -> gmem
copy(tXrX, tXgX);
}
TEST(SM80_CuTe_Ampere, Ldsm)
{
constexpr int count = 1024;
thrust::host_vector<uint16_t> h_in(count);
for (int i = 0; i < count; ++i) {
h_in[i] = uint16_t(i);
}
thrust::device_vector<uint16_t> d_in = h_in;
//
// LDSM 1x (32b)
//
{
thrust::device_vector<uint16_t> d_out(count);
ldsm_test_device<uint32_t><<<1, 32>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 32; ++i) {
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("LDSM 1x ldsm_test_device SUCCESS\n");
}
//
// LDSM 2x (64b)
//
{
thrust::device_vector<uint16_t> d_out(count);
ldsm_test_device<uint64_t><<<1, 32>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 64; ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("LDSM 2x ldsm_test_device SUCCESS\n");
}
//
// LDSM 4x (128b)
//
{
thrust::device_vector<uint16_t> d_out(count);
ldsm_test_device<uint128_t><<<1, 32>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 128; ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("LDSM 4x ldsm_test_device SUCCESS\n");
}
//
// CuTe LDSM
//
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x1_LDSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x1_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x2_LDSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x2_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x4_LDSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x4_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<UniversalCopy<uint16_t>, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i] , h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved LDS.U16 SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x1_LDSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x1_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x2_LDSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x2_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x4_LDSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x4_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<UniversalCopy<uint16_t>, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 LDS.U16 SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U16x2_LDSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_2,_1>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x2_LDSM_T SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U16x4_LDSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_4,_1>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x4_LDSM_T SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U16x8_LDSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_8,_1>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x8_LDSM_T SUCCESS\n");
}
CUTLASS_TRACE_HOST("PASS");
}
| test/unit/cute/ampere/ldsm.cu/0 | {
"file_path": "test/unit/cute/ampere/ldsm.cu",
"repo_id": "test",
"token_count": 7113
} | 47 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cutlass/trace.h>
#include <iostream>
#include <cute/tensor.hpp>
using namespace cute;
template <class Layout>
void
test_left_inverse(Layout const& layout)
{
auto inv_layout = left_inverse(layout);
CUTLASS_TRACE_HOST(layout << " ^ -1\n" << " => \n" << inv_layout);
for (int i = 0; i < size(layout); ++i) {
//printf("%3d: %3d %3d\n", i, int(layout(i)), int(inv_layout(layout(i))));
EXPECT_EQ(inv_layout(layout(i)), i);
}
CUTLASS_TRACE_HOST("Composition: " << coalesce(composition(inv_layout, layout)));
}
TEST(CuTe_core, Inverse_left)
{
{
auto layout = Layout<Shape <_1>,
Stride<_0>>{};
test_left_inverse(layout);
}
{
auto layout = Layout<Shape <Shape <_1,_1>>,
Stride<Stride<_0,_0>>>{};
test_left_inverse(layout);
}
{
auto layout = Layout<Shape <_1>,
Stride<_1>>{};
test_left_inverse(layout);
}
{
auto layout = Layout<Shape <_4>,
Stride<_1>>{};
test_left_inverse(layout);
}
{
auto layout = Layout<Shape <_4>,
Stride<_2>>{};
test_left_inverse(layout);
}
{
auto layout = Layout<Shape <_8, _4>>{};
test_left_inverse(layout);
}
{
auto layout = Layout<Shape <_8, _4>,
Stride<_4, _1>>{};
test_left_inverse(filter(layout));
}
{
auto layout = Layout<Shape< _2,_4,_6>>{};
test_left_inverse(layout);
}
{
auto layout = Layout<Shape <_2,_4,_6>,
Stride<_4,_1,_8>>{};
test_left_inverse(layout);
}
{
auto layout = Layout<Shape <_4, _2>,
Stride<_1,_16>>{};
test_left_inverse(layout);
}
//
// Swizzle left_inverse
//
{
auto layout = ComposedLayout<Swizzle<1,0,2>, _0, Layout<Shape <_4, _4>,
Stride<_1, _4>>>{};
test_left_inverse(layout);
}
{
auto layout = ComposedLayout<Swizzle<1,0,2>, _0, Layout<Shape <_4, _4>,
Stride<_4, _1>>>{};
test_left_inverse(layout);
}
{
auto layout = ComposedLayout<Swizzle<1,0,1>, _0, Layout<Shape <_4, _4>,
Stride<_8, _1>>>{};
test_left_inverse(layout);
}
//
// Negative strides (beta support)
// Post-conditions/layout indexing aren't generalized enough to support these yet
// However, the composition post-condition is general enough.
{
auto layout = make_layout(Shape<_4>{}, Stride<Int<-1>>{});
test_left_inverse(layout);
}
//{
//auto layout = Layout<Shape < _2,_4>,
// Stride<_m1,_2>>{};
//test_left_inverse(layout);
//}
//{
//auto layout = Layout<Shape < _2, _4>,
// Stride< _4,_m1>>{};
//test_left_inverse(layout);
//}
//{
//auto layout = Layout<Shape < _2, _4, _6>,
// Stride<_m1,_12,_m2>>{};
//test_left_inverse(layout);
//}
}
| test/unit/cute/core/inverse_left.cpp/0 | {
"file_path": "test/unit/cute/core/inverse_left.cpp",
"repo_id": "test",
"token_count": 1980
} | 48 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cute/tensor.hpp>
#include "../cooperative_gemm_common.hpp"
using namespace cute;
#define USE_FP8 1
#if USE_FP8
TEST(SM90_CuTe_Hopper, CooperativeGemmTilingF8) {
using TA = uint8_t;
using TB = uint8_t;
using TC = uint32_t;
constexpr uint32_t thread_block_size = 128;
constexpr int MaxVecBits = 16;
using tiled_mma_t =
TiledMMA<
MMA_Atom<SM80_16x8x32_S32S8S8S32_TN>,
Layout<Shape<_2, _2, _1>, Stride<_1, _2, _0>>,
Tile<_32, _32, _32>
>;
using swizzle = Swizzle<2, 4, 3>;
// This is for A row major, B col major according to CUTLASS default configs
using ALayout = decltype(composition(swizzle{}, Layout<Shape<_64, _64>, Stride<_64, _1>>{}));
using BLayout = decltype(composition(swizzle{}, Layout<Shape<_64, _64>, Stride<_1, _64>>{}));
using CLayout = decltype(make_layout(Shape<_64, _64>{}, LayoutLeft{}));
test_cooperative_gemm<ALayout,
BLayout,
CLayout,
ALayout,
BLayout,
CLayout,
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // A
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // B
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // C
thread_block_size,
tiled_mma_t,
MaxVecBits,
TA,
TB,
TC>();
}
#else
TEST(SM90_CuTe_Hopper, CooperativeGemmTilingF16) {
using TA = half_t;
using TB = half_t;
using TC = half_t;
constexpr uint32_t thread_block_size = 64;
constexpr int MaxVecBits = 16;
using tiled_mma_t =
TiledMMA<
MMA_Atom<SM80_16x8x16_F16F16F16F16_TN>,
Layout<Shape<_2, _1, _1>, Stride<_1, _0, _0>>,
Tile<_32, _32, _32>
>;
using swizzle = Swizzle<3, 3, 3>;
// This is for A row major, B col major according to CUTLASS default configs
using ALayout = decltype(composition(swizzle{},
Layout<Shape<_64, _64>, Stride<_64, _1>>{}));
using BLayout = decltype(composition(swizzle{},
Layout<Shape<_64, _64>, Stride<_1, _64>>{}));
using CLayout = decltype(make_layout(Shape<_64, _64>{}, LayoutLeft{}));
test_cooperative_gemm<ALayout,
BLayout,
CLayout,
ALayout,
BLayout,
CLayout,
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // A
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // B
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // C
thread_block_size,
tiled_mma_t,
MaxVecBits,
TA,
TB,
TC>();
}
#endif
| test/unit/cute/hopper/cooperative_gemm.cu/0 | {
"file_path": "test/unit/cute/hopper/cooperative_gemm.cu",
"repo_id": "test",
"token_count": 2132
} | 49 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <iomanip>
#include <utility>
#include <type_traits>
#include <vector>
#include <numeric>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
using namespace cute;
template <class GmemTensor, class RmemTiler, class CopyPolicy>
__global__
void
kernel(GmemTensor gC, RmemTiler tiler, CopyPolicy policy)
{
Tensor tCgC = local_tile(gC, tiler, 0);
Tensor rC = make_tensor_like(tCgC);
using T = typename GmemTensor::value_type;
for (int i = 0; i < size(rC); ++i) {
rC(i) = T(i % 13);
}
#if 0
print(" gC : "); print( gC); print("\n");
print("tCgC : "); print(tCgC); print("\n");
print(" rC : "); print( rC); print("\n");
#endif
// NOTE: only 1 thread, this thread produce a block of 8x8 output. The fringe will not be touched.
//copy(rC, tCgC); // Enable auto-vectorization if static
//copy_vec<T>(rC, tCgC); // Disable auto-vectorization always
copy(policy, rC, tCgC); // Use a policy to establish vectorization assumptions
}
template <class T, class CopyPolicy, class GmemLayout, class RmemTiler>
void
test_copy_vectorization(CopyPolicy policy, GmemLayout gmem_layout, RmemTiler rmem_tiler)
{
thrust::host_vector<T> h_in(cosize(gmem_layout), T(0));
thrust::device_vector<T> d_in = h_in;
Tensor m_in = make_tensor(make_gmem_ptr(raw_pointer_cast(d_in.data())), gmem_layout);
kernel<<<1,1>>>(m_in, rmem_tiler, policy);
thrust::host_vector<T> h_out = d_in;
Tensor result = make_tensor(h_out.data(), gmem_layout);
thrust::host_vector<T> h_true = h_in;
Tensor ref = make_tensor(h_true.data(), gmem_layout);
// Set the values directly in the reference tensor, no copy
Tensor ref_tile = local_tile(ref, rmem_tiler, 0);
for (int i = 0; i < size(ref_tile); ++i) {
ref_tile(i) = T(i % 13);
}
// Compare the reference and the result. Print only the first 3 errors.
// print_tensor(result);
int count = 3;
for (int i = 0; i < size(ref) && count > 0; ++i) {
EXPECT_EQ(result(i), ref(i));
if (result(i) != ref(i)) {
--count;
}
}
}
template <class T, class GmemLayout, class RmemTiler>
void
test_copy_vectorization(GmemLayout gmem_layout, RmemTiler rmem_tiler)
{
test_copy_vectorization<T>(DefaultCopy{}, gmem_layout, rmem_tiler);
}
TEST(SM70_CuTe_Volta, SimpleVec)
{
// Fully static layouts are assumed to be aligned -- these will be vectorized
test_copy_vectorization<float>(make_layout(make_shape(Int<8>{}, Int<8>{})), Shape<_8,_8>{});
test_copy_vectorization<float>(make_layout(make_shape(Int<12>{}, Int<12>{})), Shape<_8,_8>{});
// Fails in vectorization recast due to misalignment and static assertions
//test_copy_vectorization<float>(make_layout(make_shape(Int<9>{}, Int<9>{})), Shape<_8,_8>{});
// Dynamic layouts are not assumed to be aligned -- these will not be vectorized
test_copy_vectorization<float>(make_layout(make_shape(12,12)), Shape<_8,_8>{});
test_copy_vectorization<float>(make_layout(make_shape( 9, 9)), Shape<_8,_8>{});
// Dynamic layouts that are assumed to be aligned -- these will be vectorized
test_copy_vectorization<float>(AutoVectorizingCopyWithAssumedAlignment<128>{}, make_layout(make_shape( 8, 8)), Shape<_8,_8>{});
test_copy_vectorization<float>(AutoVectorizingCopyWithAssumedAlignment<128>{}, make_layout(make_shape(12,12)), Shape<_8,_8>{});
// Fails -- bad alignment assumption
//test_copy_vectorization<float>(AutoVectorizingCopyWithAssumedAlignment<128>{}, make_layout(make_shape( 9, 9)), Shape<_8,_8>{});
}
| test/unit/cute/volta/vectorization_auto.cu/0 | {
"file_path": "test/unit/cute/volta/vectorization_auto.cu",
"repo_id": "test",
"token_count": 1836
} | 50 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include <algorithm>
#include <random>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gett.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/fusion/operations.hpp"
#include "cutlass/complex.h"
#include "testbed_utils.h"
#include "cutlass/kernel_hardware_info.hpp"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/gemm/gemm.h"
#include "cute/int_tuple.hpp"
#include "cute/layout.hpp"
#include "cute/numeric/int.hpp"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
enum class ScalarLoc {
ON_HOST = 0,
ON_DEVICE = 1
};
enum class VectorBeta {
DISABLED = 0,
ENABLED = 1
};
enum class CheckEquality {
EXACT = 0,
RELATIVE = 1
};
namespace detail{
// Helper classes that take default data type when
// the Gemm::EpilogueOutputOp does not have ElementCompute
// and ElementScalar.
// (e.g. when Sm90TreeVisitor is used as FusionCallbacks)
template <typename Gemm, typename Default, typename = void>
struct ElementComputeType {
using Type = Default;
};
template <typename Gemm, typename Default>
struct ElementComputeType<Gemm, Default, std::void_t<typename Gemm::EpilogueOutputOp::ElementCompute>> {
using Type = typename Gemm::EpilogueOutputOp::ElementCompute;
};
template <typename Gemm, typename Default, typename = void>
struct ElementScalarType {
using Type = Default;
};
template <typename Gemm, typename Default>
struct ElementScalarType<Gemm, Default, std::void_t<typename Gemm::EpilogueOutputOp::ElementScalar>> {
using Type = typename Gemm::EpilogueOutputOp::ElementScalar;
};
// The maximum swizzle size to use
//
// This class, like Splits above makes it harder to confuse
// the order of arguments of the various run(...) functions in this file.
class MaxSwizzleSize {
public:
MaxSwizzleSize() = default;
template<class IntegralNotBool,
__CUTE_REQUIRES((std::is_integral_v<IntegralNotBool> &&
!cute::is_same_v<IntegralNotBool, bool>)) >
explicit MaxSwizzleSize(IntegralNotBool max_swizzle_size) : max_swizzle_size_(max_swizzle_size) {}
explicit operator int() const { return max_swizzle_size_; }
private:
int max_swizzle_size_ = 1;
};
template <typename T>
auto make_iterator(T* ptr) {
using namespace cute;
if constexpr (cute::is_subbyte_v<T>) {
return subbyte_iterator<T>(ptr);
}
else {
return ptr;
}
}
template<class T>
struct IsDefaultEpilogue {
static constexpr bool value = false;
};
template<class ...args>
struct IsDefaultEpilogue<cutlass::epilogue::collective::DefaultEpilogue<args...>> {
static constexpr bool value = true;
};
template<class ...args>
struct IsDefaultEpilogue<cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<args...>> {
static constexpr bool value = true;
};
// The number of splits to test.
//
// This class makes it harder to confuse the order of arguments
// of the various run(...) functions in this file. The constructor
// is explicit, so one can't just type 42 (or false, which the
// compiler unhelpfully turns into 0); one has to type Splits(42).
// Splits() picks the default number of splits, 1.
//
// The conversion-to-int operator (operator int()) MUST be explicit!
// Conversion to int MUST require static_cast<int>.
// Otherwise, that defeats a key purpose of this class,
// which is to catch common errors of confusing the order
// of function arguments.
class Splits {
public:
Splits() = default;
template<class IntegralNotBool,
__CUTE_REQUIRES((std::is_integral_v<IntegralNotBool> &&
!cute::is_same_v<IntegralNotBool, bool>)) >
explicit Splits(IntegralNotBool splits) : splits_(splits) {}
explicit operator int() const { return splits_; }
private:
int splits_ = 1;
};
// The number of iterations to test.
//
// This class, like Splits above makes it harder to confuse
// the order of arguments of the various run(...) functions in this file.
// Iterations() picks the default number of iterations, 20.
class Iterations {
public:
Iterations() = default;
template<class IntegralNotBool,
__CUTE_REQUIRES((std::is_integral_v<IntegralNotBool> &&
!cute::is_same_v<IntegralNotBool, bool>)) >
explicit Iterations(IntegralNotBool iterations) : iterations_(iterations) {}
explicit operator int() const { return iterations_; }
private:
int iterations_ = 20;
};
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
}
else if (bits_input <= 8) {
scope_max = 1;
scope_min = -1;
}
else{
scope_max = 4;
scope_min = -4;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else if (dist_kind == cutlass::Distribution::AllOnes) {
cutlass::reference::host::TensorFill(view, Element(1));
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
// Looks at Cute Stride to check Row / Column Major
template<typename Stride>
static constexpr bool is_row_or_col_major(){
int stride_0 = int(cute::size<0>(Stride{}));
int stride_1 = int(cute::size<1>(Stride{}));
int depth = cute::depth(Stride{});
return ((stride_0 == 1) || (stride_1 == 1)) && (depth == 1);
}
//
// Default MMA input Operands : A , B
//
template<
class ScheduleType_,
class Gemm,
class ElementA_ = typename Gemm::GemmKernel::ElementA,
class ElementB_ = typename Gemm::GemmKernel::ElementB>
struct HostCollectiveMainloop {
// Kernel data types
using ElementA = ElementA_;
using StrideA = typename Gemm::GemmKernel::StrideA;
using ElementB = ElementB_;
using StrideB = typename Gemm::GemmKernel::StrideB;
using ScheduleType = typename Gemm::GemmKernel::CollectiveMainloop::DispatchPolicy::Schedule;
using LayoutTagA = cutlass::detail::StrideToLayoutTagA_t<StrideA>;
using LayoutTagB = cutlass::detail::StrideToLayoutTagB_t<StrideB>;
using ElementAccumulator = typename Gemm::GemmKernel::ElementAccumulator;
using ElementScalingFactor = ElementAccumulator;
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
using EpilogueOutputOp = typename Gemm::EpilogueOutputOp;
using Arguments = typename Gemm::GemmKernel::MainloopArguments;
cutlass::ComplexTransform TransformA = Gemm::kTransformA;
cutlass::ComplexTransform TransformB = Gemm::kTransformB;
StrideA stride_a;
StrideB stride_b;
typename LayoutTagA::Stride stride_factor_A;
typename LayoutTagB::Stride stride_factor_B;
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::HostTensor<ElementA, LayoutTagA> tensor_A;
cutlass::HostTensor<ElementB, LayoutTagB> tensor_B;
// Whether to use relative equality checks
CheckEquality check_relative_equality = CheckEquality::EXACT;
uint64_t seed;
static constexpr uint64_t kDefaultSeed = 4096;
// Note: this limitation comes from testbed / not the library
static_assert(is_row_or_col_major<StrideA>(),
"ERROR : A Layout is neither Row / Column Major)");
static_assert(is_row_or_col_major<StrideB>(),
"ERROR : B Layout is neither Row / Column Major)");
HostCollectiveMainloop(
CheckEquality check_relative_equality_ = CheckEquality::EXACT,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
uint64_t seed_ = kDefaultSeed,
typename LayoutTagA::Stride stride_factor_A_ = typename LayoutTagA::Stride(),
typename LayoutTagB::Stride stride_factor_B_ = typename LayoutTagB::Stride()
):
stride_factor_A(stride_factor_A_),
stride_factor_B(stride_factor_B_),
init_A(init_A_), init_B(init_B_), seed(seed_),
check_relative_equality(check_relative_equality_) { }
template<class ProblemShapeType>
bool initialize(ProblemShapeType problem_size) {
//
// Allocate the GEMM workspace
//
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto M = cute::size<0>(problem_shape_MNKL);
auto N = cute::size<1>(problem_shape_MNKL);
auto K = cute::size<2>(problem_shape_MNKL);
auto L = cute::size<3>(problem_shape_MNKL);
stride_a = cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(M, K, L));
stride_b = cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(N, K, L));
// 2.x host tensor does not natively contain a batch stride or coord, so we spoof if by folding it into the outer mode
auto a_coord = cutlass::make_Coord(M * L, K);
// Cutlass has Row/Col major refers to MxK times KxN matrix product,
// so the HostTensorB should be treated as KxN in "coord"'s view
auto b_coord = cutlass::make_Coord(K, N * L);
tensor_A.resize(a_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagA>::layout_factory(a_coord, stride_factor_A));
tensor_B.resize(b_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagB>::layout_factory(b_coord, stride_factor_B));
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2022));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2021));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = ElementA(1);
tensor_B.host_view().at({0, 0}) = ElementB(1);
tensor_A.sync_device();
tensor_B.sync_device();
return true;
}
Arguments to_args() {
Arguments arguments =
{
tensor_A.device_data(), stride_a, tensor_B.device_data(), stride_b
};
return arguments;
}
auto to_host_args(ProblemShapeType problem_size) {
using namespace cute;
//
// Allocate the GEMM workspace
//
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto M = cute::size<0>(problem_shape_MNKL);
auto N = cute::size<1>(problem_shape_MNKL);
auto K = cute::size<2>(problem_shape_MNKL);
auto L = cute::size<3>(problem_shape_MNKL);
auto A = make_tensor(make_iterator(tensor_A.host_data()),
make_layout(make_shape(M, K, L), stride_a));
auto B = make_tensor(make_iterator(tensor_B.host_data()),
make_layout(make_shape(N, K, L), stride_b));
cutlass::reference::host::GettMainloopParams<ElementAccumulator,
decltype(A),
decltype(B)
> mainloop_params{};
mainloop_params.A = A;
mainloop_params.B = B;
mainloop_params.transform_A = TransformA;
mainloop_params.transform_B = TransformB;
return mainloop_params;
}
void print_tensors(std::ofstream& file) {
file << "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view();
}
template <
class Element,
class Layout
>
bool equality_check(
cutlass::TensorView<Element, Layout> const& lhs,
cutlass::TensorView<Element, Layout> const& rhs) const {
// Factors used for calculating relative equality. CUTLASS's relative-equality
// checks in include/cutlass/relatively_equal.h are inspired by
// https://floating-point-gui.de/errors/comparison/. This reference suggests using
// the minimum normal value of a given type as the nonzero_floor.
Element epsilon(static_cast<Element>(0.1f));
Element nonzero_floor(std::numeric_limits<Element>::min());
if constexpr (!cutlass::is_complex<Element>::value) {
if (check_relative_equality == CheckEquality::RELATIVE) {
return cutlass::reference::host::TensorRelativelyEquals(
lhs, rhs, epsilon, nonzero_floor);
}
else {
return cutlass::reference::host::TensorEquals(lhs, rhs);
}
}
else {
return cutlass::reference::host::TensorEquals(lhs, rhs);
}
}
bool compare_reference(
cute::Shape<int,int,int,int> problem_shape_MNKL) {
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
bool passed = true;
return passed;
}
};
template<class Gemm>
struct HostCollectiveDefaultEpilogue {
// fusion types are potentially void if the fusion is not supported
// helper so we don't try to construct HostTensor with void type
template <typename T, typename U = uint8_t>
using non_void_t = cute::conditional_t<cute::is_void_v<T>, U, T>;
using ScheduleType = typename Gemm::GemmKernel::CollectiveMainloop::DispatchPolicy::Schedule;
using kernel = typename Gemm::GemmKernel;
using Epilogue = typename kernel::CollectiveEpilogue;
using ElementD = typename kernel::ElementD;
using StrideD = typename kernel::StrideD;
using ElementC = non_void_t<typename kernel::ElementC, ElementD>;
using StrideC = typename kernel::StrideC;
using FusionOp = typename Gemm::EpilogueOutputOp;
static_assert(rank(StrideC{}) == 3, "StrideCD must be rank-3: [M, N, L]");
static_assert(rank(StrideD{}) == 3, "StrideCD must be rank-3: [M, N, L]");
static_assert(is_row_or_col_major<StrideC>(),
"ERROR : C Layout is neither Row / Column Major)");
static_assert(is_row_or_col_major<StrideD>(),
"ERROR : D Layout is neither Row / Column Major)");
// Deduce Cutlass Layouts (RowMajor & ColumnMajor)
using LayoutTagC = cutlass::detail::StrideToLayoutTagC_t<StrideC>;
using LayoutTagD = cutlass::detail::StrideToLayoutTagC_t<StrideD>;
using LayoutTagScalar = cutlass::layout::PackedVectorLayout; // scalars are size-1 vectors
using LayoutTagVector = cutlass::layout::PackedVectorLayout;
using ElementAccumulator = typename kernel::ElementAccumulator;
using ElementScalingFactor = ElementAccumulator;
using ProblemShapeType = typename kernel::ProblemShape;
using ElementCompute = typename ElementComputeType<Gemm, ElementAccumulator>::Type;
using ElementScalar = typename ElementScalarType<Gemm, ElementCompute>::Type;
using Arguments = typename Gemm::GemmKernel::EpilogueArguments;
/// Initialization
StrideC stride_c;
StrideD stride_d;
typename LayoutTagC::Stride stride_factor_C;
typename LayoutTagD::Stride stride_factor_D;
cutlass::HostTensor<ElementC, LayoutTagC> tensor_C;
// Inputs
ElementScalar alpha;
ElementScalar beta;
cutlass::HostTensor<ElementD, LayoutTagD> tensor_D;
cutlass::HostTensor<ElementD, LayoutTagD> reference_D;
// Whether to use relative equality checks
CheckEquality check_relative_equality = CheckEquality::EXACT;
// Are scalars copied to device memory before kernel launch
ScalarLoc use_device_scalars = ScalarLoc::ON_HOST;
// If per-row scale is enabled and this is true, beta is passed as a host scalar instead of device vector
VectorBeta disable_vector_beta = VectorBeta::DISABLED;
cutlass::Distribution::Kind init_C;
uint64_t seed;
static constexpr uint64_t kDefaultSeed = 4096;
HostCollectiveDefaultEpilogue(
CheckEquality check_relative_equality_ = CheckEquality::EXACT,
ScalarLoc use_device_scalars_ = ScalarLoc::ON_HOST,
VectorBeta disable_vector_beta_ = VectorBeta::DISABLED,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = kDefaultSeed
): init_C(init_C_), seed(seed_),
stride_factor_C(typename LayoutTagC::Stride()),
stride_factor_D(typename LayoutTagD::Stride()),
check_relative_equality(check_relative_equality_),
use_device_scalars(use_device_scalars_){ }
bool initialize(ProblemShapeType problem_size, ElementScalar alpha_=1.f, ElementScalar beta_=0.f) {
// Initialize Epilogue tensors
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto [M, N, K, L] = problem_shape_MNKL;
stride_c = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M, N, L));
stride_d = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M, N, L));
// 2.x host tensor does not natively contain a batch stride or coord, so we spoof if by folding it into the outer mode
auto c_coord = cutlass::make_Coord(M * L, N);
tensor_C.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagC>::layout_factory(c_coord, stride_factor_C));
tensor_D.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(c_coord, stride_factor_D));
reference_D.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(c_coord, stride_factor_D), false);
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2020));
tensor_C.host_view().at({0, 0}) = ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_C.sync_device();
tensor_D.sync_device();
alpha = alpha_;
beta = beta_;
return true;
}
template <
class Element,
class Layout
>
bool equality_check(
cutlass::TensorView<Element, Layout> const& lhs,
cutlass::TensorView<Element, Layout> const& rhs) const {
// Factors used for calculating relative equality. CUTLASS's relative-equality
// checks in include/cutlass/relatively_equal.h are inspired by
// https://floating-point-gui.de/errors/comparison/. This reference suggests using
// the minimum normal value of a given type as the nonzero_floor.
Element epsilon(static_cast<Element>(0.1f));
Element nonzero_floor(std::numeric_limits<Element>::min());
if constexpr (!cutlass::is_complex<Element>::value) {
if (check_relative_equality == CheckEquality::RELATIVE) {
return cutlass::reference::host::TensorRelativelyEquals(
lhs, rhs, epsilon, nonzero_floor);
}
else {
return cutlass::reference::host::TensorEquals(lhs, rhs);
}
}
else {
return cutlass::reference::host::TensorEquals(lhs, rhs);
}
}
bool compare_reference(
cute::Shape<int,int,int,int> problem_shape_MNKL,
ElementScalar alpha,
ElementScalar beta) {
auto [M, N, K, L] = problem_shape_MNKL;
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
if (tensor_D.size() > 1) {
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
}
if (reference_D.size() > 1) {
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
}
bool passed = equality_check(reference_D.host_view(), tensor_D.host_view());
if(!passed) {
std::cout<<"D is incorrect"<<std::endl;
}
return passed;
}
void print_tensors(std::ofstream& file) {
file
<< "\nC =\n" << tensor_C.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\n\nComputed =\n" << tensor_D.host_view();
}
Arguments to_args(ProblemShapeType problem_size) {
Arguments arguments =
{
{alpha, beta},
tensor_C.device_data(), stride_c, tensor_D.device_data(), stride_d
};
return arguments;
}
auto to_host_args(ProblemShapeType problem_size) {
using namespace cute;
//
// Allocate the GEMM workspace
//
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto M = cute::get<0>(problem_shape_MNKL);
auto N = cute::get<1>(problem_shape_MNKL);
auto K = cute::get<2>(problem_shape_MNKL);
auto L = cute::get<3>(problem_shape_MNKL);
auto coord_0 = cutlass::make_Coord(0);
auto C = cute::make_tensor(detail::make_iterator(tensor_C.host_data()),
cute::make_layout(cute::make_shape(M, N, L), stride_c));
auto D = cute::make_tensor(detail::make_iterator(reference_D.host_data()),
cute::make_layout(cute::make_shape(M, N, L), stride_d));
cutlass::reference::host::GettEpilogueParams<
ElementScalar,
ElementScalar,
ElementAccumulator,
ElementCompute,
decltype(C),
decltype(D)>
epilogue_params{};
epilogue_params.C = C;
epilogue_params.D = D;
epilogue_params.alpha = alpha;
epilogue_params.beta = beta;
return epilogue_params;
}
};
template<class Gemm>
struct HostCollectiveEpilogue {
// fusion types are potentially void if the fusion is not supported
// helper so we don't try to construct HostTensor with void type
template <typename T, typename U = uint8_t>
using non_void_t = cute::conditional_t<cute::is_void_v<T>, U, T>;
using ScheduleType = typename Gemm::GemmKernel::CollectiveMainloop::DispatchPolicy::Schedule;
using kernel = typename Gemm::GemmKernel;
using Epilogue = typename kernel::CollectiveEpilogue;
static_assert(IsDefaultEpilogue<Epilogue>::value == false, "Default Epilogue is not supported");
using ElementD = typename kernel::ElementD;
using StrideD = typename kernel::StrideD;
using ElementC = non_void_t<typename kernel::ElementC, ElementD>;
using StrideC = typename kernel::StrideC;
static_assert(rank(StrideC{}) == 3, "StrideCD must be rank-3: [M, N, L]");
static_assert(rank(StrideD{}) == 3, "StrideCD must be rank-3: [M, N, L]");
static_assert(is_row_or_col_major<StrideC>(),
"ERROR : C Layout is neither Row / Column Major)");
static_assert(is_row_or_col_major<StrideD>(),
"ERROR : D Layout is neither Row / Column Major)");
// Deduce Cutlass Layouts (RowMajor & ColumnMajor)
using LayoutTagC = cutlass::detail::StrideToLayoutTagC_t<StrideC>;
using LayoutTagD = cutlass::detail::StrideToLayoutTagC_t<StrideD>;
using LayoutTagScalar = cutlass::layout::PackedVectorLayout; // scalars are size-1 vectors
using LayoutTagVector = cutlass::layout::PackedVectorLayout;
using ElementAccumulator = typename kernel::ElementAccumulator;
using ElementScalingFactor = ElementAccumulator;
using ProblemShapeType = typename kernel::ProblemShape;
//
// FusionOperation derived types/queries
//
using EpiloguePolicy = typename Epilogue::DispatchPolicy;
static constexpr bool IsLegacy =
cute::is_same_v<
EpiloguePolicy,
cutlass::epilogue::Sm90TmaWarpSpecializedBiasElementwise<
EpiloguePolicy::StagesC, EpiloguePolicy::StagesD, EpiloguePolicy::FragmentSize>
>;
using FusionOp = typename Gemm::EpilogueOutputOp;
static_assert(cute::is_base_of_v<cutlass::epilogue::fusion::FusionOperation, FusionOp>);
using ElementCompute = typename FusionOp::ElementCompute;
using ElementScalar = typename FusionOp::ElementScalar;
using ElementBias = non_void_t<typename FusionOp::ElementBias>;
using ElementAux = non_void_t<typename FusionOp::ElementAux>;
using ElementAmax = non_void_t<typename FusionOp::ElementAmax>;
using LayoutTagAux = non_void_t<typename FusionOp::GmemLayoutTagAux, LayoutTagD>;
using ActivationFunctor = non_void_t<typename FusionOp::ActivationFn,
cutlass::epilogue::thread::Identity<ElementCompute>>;
static constexpr bool IsRowBiasEnabled = FusionOp::IsPerRowBiasSupported;
static constexpr bool IsDeBiasEnabled = FusionOp::IsDePerRowBiasSupported;
static constexpr bool IsPerRowScaleEnabled = FusionOp::IsPerRowScaleSupported;
static constexpr bool IsScaleFactorEnabled = FusionOp::IsScaleFactorSupported;
static constexpr bool IsAuxInEnabled = FusionOp::IsAuxInSupported;
static constexpr bool IsAuxOutEnabled = FusionOp::IsAuxOutSupported;
static constexpr bool IsAbsMaxEnabledD = FusionOp::IsAbsMaxSupported &&
(cute::is_same_v<ElementD, cutlass::float_e4m3_t> ||
cute::is_same_v<ElementD, cutlass::float_e5m2_t>);
static constexpr bool IsAbsMaxEnabledAux = IsAuxOutEnabled && FusionOp::IsAbsMaxSupported &&
(cute::is_same_v<ElementAux, cutlass::float_e4m3_t> ||
cute::is_same_v<ElementAux, cutlass::float_e5m2_t>);
using Arguments = typename Gemm::GemmKernel::EpilogueArguments;
/// Initialization
StrideC stride_c;
StrideD stride_d;
typename LayoutTagC::Stride stride_factor_C;
typename LayoutTagD::Stride stride_factor_D;
// Inputs
cutlass::HostTensor<ElementScalar, LayoutTagScalar> alpha;
cutlass::HostTensor<ElementScalar, LayoutTagScalar> beta;
cutlass::HostTensor<ElementScalar, LayoutTagScalar> scale_A;
cutlass::HostTensor<ElementScalar, LayoutTagScalar> scale_B;
cutlass::HostTensor<ElementScalar, LayoutTagScalar> scale_C;
cutlass::HostTensor<ElementScalar, LayoutTagScalar> scale_D;
cutlass::HostTensor<ElementScalar, LayoutTagScalar> scale_Aux;
cutlass::HostTensor<ElementBias , LayoutTagVector> bias;
cutlass::HostTensor<ElementC, LayoutTagC> tensor_C;
cutlass::HostTensor<ElementCompute, LayoutTagScalar> norm_constant;
// Outputs
cutlass::HostTensor<ElementAmax, LayoutTagScalar> abs_max_Aux;
cutlass::HostTensor<ElementAmax, LayoutTagScalar> abs_max_D;
cutlass::HostTensor<ElementAux , LayoutTagAux > tensor_Aux;
cutlass::gemm::TagToStrideC_t< LayoutTagAux > stride_Aux;
cutlass::HostTensor<ElementD, LayoutTagD> tensor_D;
cutlass::HostTensor<ElementD, LayoutTagD> reference_D;
// References
cutlass::HostTensor<ElementBias, LayoutTagVector> reference_dbias;
cutlass::HostTensor<ElementAux , LayoutTagAux > reference_Aux;
cutlass::HostTensor<ElementAmax, LayoutTagScalar> reference_abs_max_Aux;
cutlass::HostTensor<ElementAmax, LayoutTagScalar> reference_abs_max_D;
// Whether to use relative equality checks
CheckEquality check_relative_equality = CheckEquality::EXACT;
// Are scalars copied to device memory before kernel launch
ScalarLoc use_device_scalars = ScalarLoc::ON_HOST;
// If per-row scale is enabled and this is true, beta is passed as a host scalar instead of device vector
VectorBeta disable_vector_beta = VectorBeta::DISABLED;
// Random distribution with which to initialize the A/B/C/D/Aux scaling factors
cutlass::Distribution::Kind init_scale = cutlass::Distribution::Uniform;
// Random distribution with which to initialize the bias vector
cutlass::Distribution::Kind init_bias = cutlass::Distribution::Uniform;
cutlass::Distribution::Kind init_C;
uint64_t seed;
static constexpr uint64_t kDefaultSeed = 4096;
HostCollectiveEpilogue(
CheckEquality check_relative_equality_ = CheckEquality::EXACT,
ScalarLoc use_device_scalars_ = ScalarLoc::ON_HOST,
VectorBeta disable_vector_beta_ = VectorBeta::DISABLED,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = kDefaultSeed
): init_scale(init_scale_), init_bias(init_bias_),
init_C(init_C_), seed(seed_),
stride_factor_C(typename LayoutTagC::Stride()),
stride_factor_D(typename LayoutTagD::Stride()),
check_relative_equality(check_relative_equality_),
use_device_scalars(use_device_scalars_){ }
bool initialize(ProblemShapeType problem_size, ElementScalar alpha_=1.f, ElementScalar beta_=0.f) {
// Initialize Epilogue tensors
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto M = cute::size<0>(problem_shape_MNKL);
auto N = cute::size<1>(problem_shape_MNKL);
auto K = cute::size<2>(problem_shape_MNKL);
auto L = cute::size<3>(problem_shape_MNKL);
stride_c = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M, N, L));
stride_d = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M, N, L));
// 2.x host tensor does not natively contain a batch stride or coord, so we spoof if by folding it into the outer mode
auto c_coord = cutlass::make_Coord(M * L, N);
tensor_C.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagC>::layout_factory(c_coord, stride_factor_C));
tensor_D.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(c_coord, stride_factor_D));
reference_D.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(c_coord, stride_factor_D), false);
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2020));
tensor_C.host_view().at({0, 0}) = ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_C.sync_device();
tensor_D.sync_device();
auto scalar_coord = cutlass::make_Coord(1);
auto col_vector_coord = cutlass::make_Coord(M);
auto row_vector_coord = cutlass::make_Coord(N);
if constexpr (IsPerRowScaleEnabled) {
alpha.resize(col_vector_coord);
EXPECT_TRUE(initialize_tensor(alpha.host_view(), init_scale, seed + 2023));
if (disable_vector_beta == VectorBeta::DISABLED) {
beta.resize(scalar_coord, false);
cutlass::reference::host::TensorFill(beta.host_view(), beta_);
}
else {
beta.resize(col_vector_coord);
EXPECT_TRUE(initialize_tensor(beta.host_view(), init_scale, seed + 2024));
}
}
else {
alpha.resize(scalar_coord, (use_device_scalars == ScalarLoc::ON_DEVICE));
beta.resize(scalar_coord, (use_device_scalars == ScalarLoc::ON_DEVICE));
cutlass::reference::host::TensorFill(alpha.host_view(), alpha_);
cutlass::reference::host::TensorFill(beta.host_view(), beta_);
}
alpha.sync_device();
beta.sync_device();
if constexpr (IsScaleFactorEnabled) {
scale_A.resize(scalar_coord, (use_device_scalars == ScalarLoc::ON_DEVICE));
scale_B.resize(scalar_coord, (use_device_scalars == ScalarLoc::ON_DEVICE));
scale_C.resize(scalar_coord, (use_device_scalars == ScalarLoc::ON_DEVICE));
scale_D.resize(scalar_coord, (use_device_scalars == ScalarLoc::ON_DEVICE));
EXPECT_TRUE(initialize_tensor(scale_A.host_view(), init_scale, seed + 2023));
EXPECT_TRUE(initialize_tensor(scale_B.host_view(), init_scale, seed + 2024));
EXPECT_TRUE(initialize_tensor(scale_C.host_view(), init_scale, seed + 2025));
EXPECT_TRUE(initialize_tensor(scale_D.host_view(), init_scale, seed + 2026));
scale_A.sync_device();
scale_B.sync_device();
scale_C.sync_device();
scale_D.sync_device();
}
if constexpr (
IsRowBiasEnabled
) {
bias.resize(IsRowBiasEnabled ? col_vector_coord : row_vector_coord);
EXPECT_TRUE(initialize_tensor(bias.host_view(), init_bias, seed + 2023));
bias.sync_device();
}
if constexpr (IsDeBiasEnabled) {
bias.resize(col_vector_coord);
reference_dbias.resize(col_vector_coord);
cutlass::reference::host::TensorFill(bias.host_view(), ElementBias(0));
cutlass::reference::host::TensorFill(reference_dbias.host_view(), ElementBias(0));
bias.sync_device();
}
if constexpr (IsAbsMaxEnabledD) {
abs_max_D.resize(scalar_coord);
// ensure in-place device reductions perform their own initialization
cutlass::reference::host::TensorFill(abs_max_D.host_view(),
CUTLASS_STL_NAMESPACE::numeric_limits<ElementAmax>::max());
abs_max_D.sync_device();
reference_abs_max_D.resize(scalar_coord);
cutlass::reference::host::TensorFill(reference_abs_max_D.host_view(), ElementAmax(0));
}
if constexpr (IsAuxInEnabled) {
auto aux_coord = cutlass::make_Coord(M * L, N);
auto aux_layout = cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(aux_coord, typename LayoutTagAux::Stride{});
tensor_Aux.resize(aux_coord, aux_layout);
EXPECT_TRUE(initialize_tensor(tensor_Aux.host_view(), init_C, seed + 2023));
tensor_Aux.sync_device();
stride_Aux = cutlass::make_cute_packed_stride(cutlass::gemm::TagToStrideC_t<LayoutTagAux>{}, cute::make_shape(M, N, L));
}
if constexpr (IsAuxOutEnabled) {
auto aux_coord = cutlass::make_Coord(M * L, N);
auto aux_layout = cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(aux_coord, typename LayoutTagAux::Stride{});
tensor_Aux.resize(aux_coord, aux_layout);
reference_Aux.resize(aux_coord, aux_layout, false);
tensor_Aux.sync_device();
stride_Aux = cutlass::make_cute_packed_stride(cutlass::gemm::TagToStrideC_t<LayoutTagAux>{}, cute::make_shape(M, N, L));
if constexpr (IsScaleFactorEnabled) {
scale_Aux.resize(scalar_coord, (use_device_scalars == ScalarLoc::ON_DEVICE));
EXPECT_TRUE(initialize_tensor(scale_Aux.host_view(), init_scale, seed + 2027));
scale_Aux.sync_device();
}
if constexpr (IsAbsMaxEnabledAux) {
abs_max_Aux.resize(scalar_coord);
// ensure in-place device reductions perform their own initialization
cutlass::reference::host::TensorFill(abs_max_Aux.host_view(),
CUTLASS_STL_NAMESPACE::numeric_limits<ElementAmax>::max());
abs_max_Aux.sync_device();
reference_abs_max_Aux.resize(scalar_coord);
cutlass::reference::host::TensorFill(reference_abs_max_Aux.host_view(), ElementAmax(0));
}
}
return true;
}
template <
class Element,
class Layout
>
bool equality_check(
cutlass::TensorView<Element, Layout> const& lhs,
cutlass::TensorView<Element, Layout> const& rhs) const {
// Factors used for calculating relative equality. CUTLASS's relative-equality
// checks in include/cutlass/relatively_equal.h are inspired by
// https://floating-point-gui.de/errors/comparison/. This reference suggests using
// the minimum normal value of a given type as the nonzero_floor.
Element epsilon(static_cast<Element>(0.1f));
Element nonzero_floor(std::numeric_limits<Element>::min());
if constexpr (!cutlass::is_complex<Element>::value) {
if (check_relative_equality == CheckEquality::RELATIVE) {
return cutlass::reference::host::TensorRelativelyEquals(
lhs, rhs, epsilon, nonzero_floor);
}
else {
return cutlass::reference::host::TensorEquals(lhs, rhs);
}
}
else {
return cutlass::reference::host::TensorEquals(lhs, rhs);
}
}
bool compare_reference(
cute::Shape<int,int,int,int> problem_shape_MNKL,
ElementScalar alpha,
ElementScalar beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
if (tensor_D.size() > 1) {
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
}
if (reference_D.size() > 1) {
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
}
bool passed = equality_check(reference_D.host_view(), tensor_D.host_view());
if(!passed) {
std::cout<<"D is incorrect"<<std::endl;
}
if constexpr (IsAbsMaxEnabledD) {
abs_max_D.sync_host();
passed &= equality_check(reference_abs_max_D.host_view(), abs_max_D.host_view());
}
if constexpr (IsDeBiasEnabled) {
bias.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(bias.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_dbias.host_view()), 0);
passed &= equality_check(reference_dbias.host_view(), bias.host_view());
}
if constexpr (IsAuxOutEnabled) {
tensor_Aux.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Aux.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_Aux.host_view()), 0);
passed &= equality_check(reference_Aux.host_view(), tensor_Aux.host_view());
if(!passed) {
std::cout<<"Aux is incorrect"<<std::endl;
}
if constexpr (IsAbsMaxEnabledAux) {
abs_max_Aux.sync_host();
bool tmp = equality_check(reference_abs_max_Aux.host_view(), abs_max_Aux.host_view());
if(!tmp) {
std::cout<<"AbsMax of Aux is incorrect"<<std::endl;
}
passed &= tmp;
}
}
return passed;
}
void print_tensors(std::ofstream& file) {
auto coord_0 = cutlass::make_Coord(0);
if constexpr (IsScaleFactorEnabled) {
file
<< ", scale_a: " << scale_A.at(coord_0)
<< ", scale_b: " << scale_B.at(coord_0)
<< ", scale_c: " << scale_C.at(coord_0);
}
if constexpr (IsPerRowScaleEnabled) {
file << "\n\nvalpha = \n" << alpha.host_view();
file << "\n\nvbeta = \n" << beta.host_view();
} else {
file
<< ", alpha: " << alpha.at(coord_0) << ", beta: " << beta.at(coord_0);
}
file << "\n\n";
if constexpr (IsAbsMaxEnabledD) {
file << "scale_d: " << float(scale_D.at(coord_0));
file << "\nReference abs_max_D :";
file << " " << float(reference_abs_max_D.at(coord_0));
file << "\nComputed abs_max_D :";
file << " " << float(abs_max_D.at(coord_0));
file << "\n\n";
}
if constexpr (IsAbsMaxEnabledAux) {
file << "scale_aux: " << float(scale_Aux.at(coord_0));
file << "\nReference abs_max_Aux :";
file << " " << float(reference_abs_max_Aux.at(coord_0));
file << "\nComputed abs_max_Aux :";
file << " " << float(abs_max_Aux.at(coord_0));
file << "\n\n";
}
if constexpr (IsRowBiasEnabled) {
file << "\n\nBias = \n" << bias.host_view();
}
if constexpr (IsAuxInEnabled) {
file << "\n\nAux Input = \n" << tensor_Aux.host_view();
}
if constexpr (IsDeBiasEnabled) {
file << "\n\nReference dBias = \n" << reference_dbias.host_view();
file << "\n\nComputed dBias = \n" << bias.host_view();
}
if constexpr (IsAuxOutEnabled) {
file
<< "\n\nReference Aux =\n" << reference_Aux.host_view()
<< "\n\nComputed Aux =\n" << tensor_Aux.host_view();
}
file
<< "\nC =\n" << tensor_C.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\n\nComputed =\n" << tensor_D.host_view();
}
Arguments to_args(ProblemShapeType problem_size) {
auto coord_0 = cutlass::make_Coord(0);
Arguments arguments =
{
{},
tensor_C.device_data(), stride_c, tensor_D.device_data(), stride_d
};
auto &fusion_args = arguments.thread;
if constexpr (IsLegacy) {
arguments.thread = {
alpha.at(coord_0),
beta.at(coord_0),
alpha.device_data(),
beta.device_data()
};
arguments.ptr_Bias = bias.device_data();
arguments.ptr_T = tensor_Aux.device_data();
}
else {
fusion_args.alpha = alpha.at(coord_0);
fusion_args.beta = beta.at(coord_0);
fusion_args.alpha_ptr = alpha.device_data();
fusion_args.beta_ptr = beta.device_data(); // if disable_vector_beta is true this is nullptr
if constexpr (IsScaleFactorEnabled) {
fusion_args.scale_a = scale_A.at(coord_0);
fusion_args.scale_b = scale_B.at(coord_0);
fusion_args.scale_c = scale_C.at(coord_0);
fusion_args.scale_d = scale_D.at(coord_0);
fusion_args.scale_a_ptr = scale_A.device_data();
fusion_args.scale_b_ptr = scale_B.device_data();
fusion_args.scale_c_ptr = scale_C.device_data();
fusion_args.scale_d_ptr = scale_D.device_data();
}
if constexpr (
IsRowBiasEnabled
) {
fusion_args.bias_ptr = bias.device_data();
}
if constexpr (IsDeBiasEnabled) {
fusion_args.dbias_ptr = bias.device_data();
}
// example of how to set kernel activation arguments
// see ActivationFunctor::Arguments in activation.h for definition
// if Arguments doesn't exist then fusion_args.activation is empty
if constexpr (cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::ScaledGELU_taylor<ElementCompute>>) {
fusion_args.activation.scale = ElementCompute(1);
}
// Treat Clamp as ReLU
if constexpr (cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::Clamp<ElementCompute>>) {
fusion_args.activation.lower_bound = 0;
fusion_args.activation.upper_bound = std::numeric_limits<ElementCompute>::max();
}
if constexpr (IsAbsMaxEnabledD) {
fusion_args.amax_D_ptr = abs_max_D.device_data();
}
if constexpr (IsAuxInEnabled) {
fusion_args.aux_ptr = tensor_Aux.device_data();
fusion_args.dAux = stride_Aux;
}
if constexpr (IsAuxOutEnabled) {
fusion_args.aux_ptr = tensor_Aux.device_data();
fusion_args.dAux = stride_Aux;
if constexpr (IsScaleFactorEnabled) {
fusion_args.scale_aux = scale_Aux.at(coord_0);
fusion_args.scale_aux_ptr = scale_Aux.device_data();
}
if constexpr (IsAbsMaxEnabledAux) {
fusion_args.amax_aux_ptr = abs_max_Aux.device_data();
}
}
}
return arguments;
}
auto to_host_args(ProblemShapeType problem_size) {
using namespace cute;
//
// Allocate the GEMM workspace
//
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto M = cute::get<0>(problem_shape_MNKL);
auto N = cute::get<1>(problem_shape_MNKL);
auto K = cute::get<2>(problem_shape_MNKL);
auto L = cute::get<3>(problem_shape_MNKL);
auto coord_0 = cutlass::make_Coord(0);
auto C = cute::make_tensor(detail::make_iterator(tensor_C.host_data()),
cute::make_layout(cute::make_shape(M, N, L), stride_c));
auto D = cute::make_tensor(detail::make_iterator(reference_D.host_data()),
cute::make_layout(cute::make_shape(M, N, L), stride_d));
auto Bias = cute::make_tensor(detail::make_iterator(IsDeBiasEnabled ? reference_dbias.host_data() : bias.host_data()),
cute::make_layout(cute::make_shape(IsRowBiasEnabled ? M : N)));
auto Aux = cute::make_tensor(detail::make_iterator(IsAuxInEnabled ? tensor_Aux.host_data() : reference_Aux.host_data()),
cute::make_layout(cute::make_shape(M, N, L), stride_Aux));
auto Valpha = cute::make_tensor(detail::make_iterator(alpha.host_data()),
cute::make_layout(cute::make_shape(M, cute::_1{})));
auto Vbeta = cute::make_tensor(detail::make_iterator(beta.host_data()),
cute::make_layout(cute::make_shape(M, cute::_1{})));
cutlass::reference::host::GettEpilogueParams<
ElementScalar,
ElementScalar,
ElementAccumulator,
ElementCompute,
decltype(C),
decltype(D),
decltype(Bias),
decltype(Aux),
decltype(Valpha),
decltype(Vbeta),
ActivationFunctor,
cutlass::plus<ElementCompute>
> epilogue_params{};
epilogue_params.C = C;
epilogue_params.D = D;
epilogue_params.alpha = alpha.at(coord_0);
epilogue_params.beta = beta.at(coord_0);
if constexpr (IsScaleFactorEnabled) {
epilogue_params.scale_a = scale_A.at(coord_0);
epilogue_params.scale_b = scale_B.at(coord_0);
epilogue_params.scale_c = scale_C.at(coord_0);
epilogue_params.scale_d = scale_D.at(coord_0);
}
if constexpr (IsRowBiasEnabled
or IsDeBiasEnabled)
{
epilogue_params.Bias = Bias;
}
if constexpr (IsAbsMaxEnabledD) {
epilogue_params.abs_max_D = reference_abs_max_D.host_data();
}
if constexpr (IsAuxInEnabled) {
epilogue_params.Aux = Aux;
}
if constexpr (IsAuxOutEnabled) {
epilogue_params.Aux = Aux;
if constexpr (IsScaleFactorEnabled) {
epilogue_params.scale_aux = scale_Aux.at(coord_0);
}
if constexpr (IsAbsMaxEnabledAux) {
epilogue_params.abs_max_Aux = reference_abs_max_Aux.host_data();
}
}
if constexpr (IsPerRowScaleEnabled) {
epilogue_params.Valpha = Valpha;
if (disable_vector_beta == VectorBeta::ENABLED) {
epilogue_params.Vbeta = Vbeta;
}
}
return epilogue_params;
}
};
template <
typename Gemm,
template <class T> class ActivationFunctor_ = cutlass::epilogue::thread::Identity,
bool force_legacy_epilogue = false,
typename ElementA = typename Gemm::GemmKernel::ElementA,
typename ElementB = typename Gemm::GemmKernel::ElementB
>
struct TestbedImpl {
// Kernel data types
using ScheduleType = typename Gemm::GemmKernel::CollectiveMainloop::DispatchPolicy::Schedule;
// All Collective MMA operands are defined by HostCollectiveMainloopType based on the schedule type
using HostCollectiveMainloopType = HostCollectiveMainloop<ScheduleType, Gemm, ElementA, ElementB>;
using CollectiveEpilogue = cute::conditional_t<IsDefaultEpilogue<typename Gemm::GemmKernel::CollectiveEpilogue>::value || force_legacy_epilogue,
HostCollectiveDefaultEpilogue<Gemm>,
HostCollectiveEpilogue<Gemm>>;
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
using ElementAccumulator = typename Gemm::GemmKernel::ElementAccumulator;
using ElementCompute = typename ElementComputeType<Gemm, ElementAccumulator>::Type;
using ElementScalar = typename ElementScalarType<Gemm, ElementCompute>::Type;
using LayoutTagA = typename HostCollectiveMainloopType::LayoutTagA;
using LayoutTagB = typename HostCollectiveMainloopType::LayoutTagB;
using LayoutTagC = typename CollectiveEpilogue::LayoutTagC;
using LayoutTagD = typename CollectiveEpilogue::LayoutTagD;
uint32_t sm_count;
// Used to force multi-wave tests for persistent kernel schedules
constexpr static int MaxSmCount = 16;
static constexpr uint64_t kDefaultSeed = 4096;
static constexpr uint32_t mma_promotion_interval = 4;
using RasterOrderOptions = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::RasterOrderOptions;
using DecompositionMode = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamKParams::DecompositionMode;
HostCollectiveMainloopType collective_mma_inputs;
CollectiveEpilogue collective_epilogue;
//
// Methods
//
TestbedImpl(
CheckEquality check_relative_equality_ = CheckEquality::EXACT,
ScalarLoc use_device_scalars_ = ScalarLoc::ON_HOST,
VectorBeta disable_vector_beta_ = VectorBeta::DISABLED,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = kDefaultSeed
): collective_mma_inputs(HostCollectiveMainloopType(check_relative_equality_, init_A_, init_B_, seed_)),
collective_epilogue(CollectiveEpilogue(check_relative_equality_, use_device_scalars_, disable_vector_beta_, init_C_, init_scale_, init_bias_, seed_)) { }
TestbedImpl(
typename LayoutTagA::Stride stride_factor_A_,
typename LayoutTagB::Stride stride_factor_B_,
typename LayoutTagC::Stride stride_factor_C_,
typename LayoutTagD::Stride stride_factor_D_,
CheckEquality check_relative_equality_ = CheckEquality::EXACT,
ScalarLoc use_device_scalars_ = ScalarLoc::ON_HOST,
VectorBeta disable_vector_beta_ = VectorBeta::DISABLED,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = kDefaultSeed
): collective_mma_inputs(HostCollectiveMainloopType(check_relative_equality_, stride_factor_A_, stride_factor_B_, init_A_, init_B_, seed_)),
collective_epilogue(CollectiveEpilogue(check_relative_equality_, use_device_scalars_, disable_vector_beta_, init_C_, init_scale_, init_bias_, seed_)) { }
/// Initializes data structures
bool initialize(ProblemShapeType problem_size, ElementScalar alpha_=1.f, ElementScalar beta_=0.f) {
collective_mma_inputs.initialize(problem_size);
collective_epilogue.initialize(problem_size, alpha_, beta_);
return true;
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cute::Shape<int,int,int,int> problem_shape_MNKL,
ElementScalar alpha,
ElementScalar beta)
{
auto [M, N, K, L] = problem_shape_MNKL;
bool passed = collective_mma_inputs.compare_reference(problem_shape_MNKL);
passed &= collective_epilogue.compare_reference(problem_shape_MNKL, alpha, beta);
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Gemm_device_"
<< M << "x" << N << "x" << K << "x" << L << "_"
<< cute::get<0>(typename Gemm::GemmKernel::TileShape{}) << "_"
<< cute::get<1>(typename Gemm::GemmKernel::TileShape{}) << "_"
<< cute::get<2>(typename Gemm::GemmKernel::TileShape{}) << ".txt";
std::ofstream file(fname.str());
file
<< "problem: " << ' ' << M << "x" << N << "x" << K << ", Batch count = " << L
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
collective_mma_inputs.print_tensors(file);
collective_epilogue.print_tensors(file);
}
return passed;
}
/// Verifies the result is a GEMM
bool verify(
ProblemShapeType problem_size,
ElementScalar alpha,
ElementScalar beta)
{
using namespace cute;
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto mainloop_params = collective_mma_inputs.to_host_args(problem_size);
auto epilogue_params = collective_epilogue.to_host_args(problem_size);
cutlass::reference::host::Gemm3x(mainloop_params, epilogue_params);
bool passed = compare_reference(problem_shape_MNKL, alpha, beta);
return passed;
}
/// Determine if the CUDA device is sufficient to run the kernel
bool sufficient() {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = static_cast<size_t>(Gemm::GemmKernel::SharedStorageSize);
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
cudaDeviceProp properties;
result = cudaGetDeviceProperties(&properties, device_idx);
this->sm_count = properties.multiProcessorCount;
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
printf("failed due to smem_size\n");
printf("hardware smem_size: %d, required smem_size: %d\n\n", int(properties.sharedMemPerBlockOptin), int(smem_size));
return false;
}
return true;
}
bool profile(
ProblemShapeType problem_size,
int iterations,
Gemm& gemm_op,
typename Gemm::Arguments& arguments,
cutlass::device_memory::allocation<uint8_t>& workspace) {
int M = cute::size<0>(problem_size);
int N = cute::size<1>(problem_size);
int K = cute::size<2>(problem_size);
int L = 1;
if constexpr(cute::rank(ProblemShapeType{}) == 4) {
L = cute::size<3>(problem_size);
}
cutlass::Status status;
//
// Run the GEMM
//
cudaError_t result;
for (int iter = 0; iter < iterations; ++iter) {
status = gemm_op(arguments, workspace.get());
if (status != cutlass::Status::kSuccess) {
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
return false;
}
}
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
EXPECT_EQ(result, cudaSuccess) << "Error at Kernel Sync.";
return false;
}
return true;
}
/// Executes one test
bool run(
ProblemShapeType problem_size,
ElementScalar alpha = ElementScalar(1),
ElementScalar beta = ElementScalar(0),
bool profiling = false,
detail::Iterations iterations = detail::Iterations{},
RasterOrderOptions raster_order = RasterOrderOptions::Heuristic,
detail::MaxSwizzleSize max_swizzle = detail::MaxSwizzleSize{},
detail::Splits splits = detail::Splits{},
DecompositionMode decomposition_mode = DecompositionMode::Heuristic
)
{
// Fail test if insufficient CUDA device
if (!sufficient()) {
std::cout << "Test failed due to insufficient CUDA device." << std::endl;
return false;
}
if (!this->initialize(problem_size, alpha, beta)) {
std::cerr << "Initialization failed \n";
return false;
}
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments;
cutlass::KernelHardwareInfo hw_info;
hw_info.device_id = 0;
if (not profiling) {
this->sm_count = std::min(MaxSmCount, cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id));
hw_info.sm_count = this->sm_count;
}
else {
this->sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
hw_info.sm_count = this->sm_count;
}
typename Gemm::GemmKernel::TileScheduler::Arguments scheduler_args;
if constexpr (cute::is_same_v<typename Gemm::GemmKernel::TileSchedulerTag, cutlass::gemm::StreamKScheduler>) {
scheduler_args = { static_cast<int>(splits), static_cast<int>(max_swizzle), raster_order, decomposition_mode };
}
else {
scheduler_args = { static_cast<int>(max_swizzle), raster_order };
}
typename HostCollectiveMainloopType::Arguments mainloop_args;
mainloop_args = collective_mma_inputs.to_args();
arguments =
{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size,
mainloop_args,
collective_epilogue.to_args(problem_size),
hw_info,
scheduler_args
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.can_implement(arguments);
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
//
// Run the GEMM
//
if (profiling) {
return profile(problem_size, static_cast<int>(iterations), gemm_op, arguments, workspace);
}
else {
cudaError_t result;
status = gemm_op.initialize(arguments, workspace.get());
status = gemm_op.run();
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
EXPECT_EQ(result, cudaSuccess) << "Error at Kernel Sync.";
return false;
}
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Error : Failed : with alpha: " << alpha << ", beta: " << beta
<< "\n";
}
return passed;
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Gemm,
template <class T> class ActivationFunctor = cutlass::epilogue::thread::Identity,
bool force_legacy_epilogue = false,
typename ElementA = typename Gemm::GemmKernel::ElementA,
typename ElementB = typename Gemm::GemmKernel::ElementB
>
struct Testbed3x {
using TestBedImpl = typename detail::TestbedImpl<
Gemm,
ActivationFunctor,
force_legacy_epilogue,
ElementA,
ElementB
>;
using Kernel = typename Gemm::GemmKernel;
using Epilogue = typename Gemm::GemmKernel::CollectiveEpilogue;
using ElementAccumulator = typename TestBedImpl::ElementAccumulator;
using ElementCompute = typename TestBedImpl::ElementCompute;
using ElementScalar = typename TestBedImpl::ElementScalar;
using RasterOrderOptions = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::RasterOrderOptions;
using DecompositionMode = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamKParams::DecompositionMode;
// Detail Implementation
TestBedImpl impl_;
//
// Methods
//
Testbed3x(
CheckEquality check_relative_equality_ = CheckEquality::EXACT,
ScalarLoc use_device_scalars_ = ScalarLoc::ON_DEVICE,
VectorBeta disable_vector_beta_ = VectorBeta::DISABLED,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = TestBedImpl::kDefaultSeed)
: impl_(check_relative_equality_, use_device_scalars_, disable_vector_beta_, init_A_, init_B_, init_C_, init_scale_, init_bias_, seed_) {}
/// Executes one test
bool run(
typename TestBedImpl::ProblemShapeType problem_size,
ElementScalar alpha = ElementScalar(1),
ElementScalar beta = ElementScalar(0),
RasterOrderOptions raster_order = RasterOrderOptions::Heuristic,
detail::MaxSwizzleSize max_swizzle = detail::MaxSwizzleSize{},
detail::Splits splits = detail::Splits{},
DecompositionMode decomposition_mode = DecompositionMode::Heuristic,
bool profiling = false,
detail::Iterations iterations = detail::Iterations{}
)
{
return impl_.run(
problem_size, alpha, beta, profiling, iterations, raster_order, max_swizzle, splits, decomposition_mode
);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
bool TestGemmPerf3x(int iterations = 20) {
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
using ElementAccumulator = typename Gemm::GemmKernel::ElementAccumulator;
using ElementScalar = ElementAccumulator;
bool passed = true;
using DecompositionMode = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamKParams::DecompositionMode;
using RasterOrderOptions = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::RasterOrderOptions;
std::vector<int> problem_size_m = { 4608 };
std::vector<int> problem_size_n = { 4608 };
std::vector<int> problem_size_k = { 8192 };
Testbed3x<Gemm> testbed;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
ProblemShapeType problem_size;
if constexpr (cute::rank(ProblemShapeType{}) == 4) {
problem_size = ProblemShapeType{m, n, k, /* l */ 1};
}
else {
problem_size = ProblemShapeType{m, n, k};
}
passed = testbed.run(
problem_size,
cutlass::from_real<ElementScalar>(1),
cutlass::from_real<ElementScalar>(0),
RasterOrderOptions{}, detail::MaxSwizzleSize(1), detail::Splits{1}, DecompositionMode{},
true, // profiling
detail::Iterations{iterations});
if (!passed) {
return false;
}
}
}
}
return true;
}
template <
typename Gemm,
template <class T> class ActivationFunctor = cutlass::epilogue::thread::Identity
>
bool TestAll(double alpha = 1.0, double beta = 0.0, CheckEquality check_relative_equality = CheckEquality::RELATIVE) {
using ElementScalar = typename Gemm::EpilogueOutputOp::ElementScalar;
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
Testbed3x<Gemm, ActivationFunctor> testbed(check_relative_equality, ScalarLoc::ON_HOST, VectorBeta::DISABLED);
int max_alignment = std::max(Gemm::kAlignmentA, Gemm::kAlignmentB);
std::vector<int> problem_size_m = {max_alignment, 512 - 3 * max_alignment};
std::vector<int> problem_size_n = {max_alignment, 512 - 2 * max_alignment};
if constexpr (cute::is_same_v<typename Gemm::GemmKernel::DispatchPolicy::Schedule,
cutlass::gemm::KernelTmaWarpSpecializedPingpong>) {
problem_size_m.push_back(768);
problem_size_n.push_back(768);
}
constexpr int Stages = Gemm::GemmKernel::DispatchPolicy::Stages;
constexpr int TileShapeK = cute::size<2>(typename Gemm::GemmKernel::TileShape{});
std::vector<int> problem_size_k = {max_alignment, TileShapeK * (Stages + 1) - max_alignment};
using DecompositionMode = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamKParams::DecompositionMode;
std::vector<DecompositionMode> decomposition_modes = {DecompositionMode::Heuristic};
std::vector problem_splits = {detail::Splits{1}};
static constexpr bool UsesStreamKScheduler = cute::is_same_v<typename Gemm::GemmKernel::TileSchedulerTag, cutlass::gemm::StreamKScheduler>;
if constexpr (UsesStreamKScheduler) {
problem_splits.push_back(detail::Splits{2});
problem_splits.push_back(detail::Splits{3});
decomposition_modes.push_back(DecompositionMode::DataParallel);
decomposition_modes.push_back(DecompositionMode::SplitK);
decomposition_modes.push_back(DecompositionMode::StreamK);
// Use larger K sizes for stream-K tests
static constexpr int min_tiles_per_sk_unit = cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90StreamKParams::min_iters_per_sk_unit_;
problem_size_k = {TileShapeK * min_tiles_per_sk_unit, TileShapeK * 3 * min_tiles_per_sk_unit - max_alignment};
}
using RasterOrderOptions = typename cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::RasterOrderOptions;
std::vector<RasterOrderOptions> raster_orders = {RasterOrderOptions::AlongM, RasterOrderOptions::AlongN};
std::vector max_swizzle_sizes{detail::MaxSwizzleSize{1}, detail::MaxSwizzleSize{4}};
bool passed = true;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (auto raster_order : raster_orders) {
for (auto max_swizzle_size : max_swizzle_sizes) {
for (DecompositionMode decomp_mode : decomposition_modes) {
std::vector problem_splits = {detail::Splits{1}};
if (decomp_mode == DecompositionMode::Heuristic || decomp_mode == DecompositionMode::SplitK) {
auto max_splits = (k + TileShapeK - 1) / TileShapeK;
if (max_splits > 2) {
problem_splits.push_back(detail::Splits{2});
}
if (max_splits > 3) {
problem_splits.push_back(detail::Splits{3});
}
problem_splits.push_back(detail::Splits{max_splits});
// Test the case in which we ask for more splits than there are K tiles in the GEMM. In this
// case, split-K will fall back to a splitting factor of `max_splits`.
problem_splits.push_back(detail::Splits{max_splits + 1});
}
for (auto splits : problem_splits) {
ProblemShapeType problem_size;
if constexpr (cute::rank(ProblemShapeType{}) == 4) {
problem_size = ProblemShapeType{m, n, k, /* l */ 1};
}
else {
problem_size = ProblemShapeType{m, n, k};
}
passed = testbed.run(
problem_size,
cutlass::from_real<ElementScalar>(alpha),
cutlass::from_real<ElementScalar>(beta),
raster_order,
max_swizzle_size,
splits,
decomp_mode
);
if (!passed) {
std::cout << __FILE__ << ':' << __LINE__ << " : GEMM MNK " << m << " " << n << " " << k << " FAILED.\n";
return false;
}
} // splits
} // decomposition_mode
} // max_swizzle_size
} // raster_order
} // k
} // n
} // m
// if we do support batched GEMM, just run one test on it to save on test time
if constexpr (cute::rank(ProblemShapeType{}) == 4) {
auto problem_size = ProblemShapeType{256 + max_alignment, 256 + max_alignment, 160 + max_alignment, /* l */ 3};
passed = testbed.run(
problem_size,
cutlass::from_real<ElementScalar>(alpha),
cutlass::from_real<ElementScalar>(beta)
);
if (!passed) {
return false;
}
}
return passed;
}
template <typename Gemm>
bool TestAllBiasElementwise(double alpha = 1.0, double beta = 0.0, CheckEquality check_relative_equality = CheckEquality::EXACT) {
return TestAll<Gemm>(alpha, beta, check_relative_equality);
}
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/gemm_testbed_3x.hpp/0 | {
"file_path": "test/unit/gemm/device/gemm_testbed_3x.hpp",
"repo_id": "test",
"token_count": 26769
} | 51 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "testbed_utils.h"
#include "testbed_universal.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm, bool Relu = false>
struct Testbed {
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
typename Gemm::LayoutA::Stride stride_factor_A;
typename Gemm::LayoutB::Stride stride_factor_B;
typename Gemm::LayoutC::Stride stride_factor_C;
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A;
cutlass::HostTensor<typename Gemm::ElementB, typename Gemm::LayoutB> tensor_B;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_C;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_D;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> reference_D;
//
// Methods
//
Testbed(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
stride_factor_A(typename Gemm::LayoutA::Stride()),
stride_factor_B(typename Gemm::LayoutB::Stride()),
stride_factor_C(typename Gemm::LayoutC::Stride()),
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
Testbed(
typename Gemm::LayoutA::Stride stride_factor_A_,
typename Gemm::LayoutB::Stride stride_factor_B_,
typename Gemm::LayoutC::Stride stride_factor_C_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
stride_factor_A(stride_factor_A_),
stride_factor_B(stride_factor_B_),
stride_factor_C(stride_factor_C_),
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 1;
scope_min = -1;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the GEMM workspace
//
tensor_A.resize(problem_size.mk(), cutlass::layout::Affine2Layout_Factory<typename Gemm::LayoutA>::layout_factory(problem_size.mk(), stride_factor_A));
tensor_B.resize(problem_size.kn(), cutlass::layout::Affine2Layout_Factory<typename Gemm::LayoutB>::layout_factory(problem_size.kn(), stride_factor_B));
tensor_C.resize(problem_size.mn(), cutlass::layout::Affine2Layout_Factory<typename Gemm::LayoutC>::layout_factory(problem_size.mn(), stride_factor_C));
tensor_D.resize(problem_size.mn(), cutlass::layout::Affine2Layout_Factory<typename Gemm::LayoutC>::layout_factory(problem_size.mn(), stride_factor_C));
reference_D.resize(problem_size.mn(), cutlass::layout::Affine2Layout_Factory<typename Gemm::LayoutC>::layout_factory(problem_size.mn(), stride_factor_C), false);
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = typename Gemm::ElementA(1);
tensor_B.host_view().at({0, 0}) = typename Gemm::ElementB(1);
tensor_C.host_view().at(cutlass::make_Coord(0, 0)) = typename Gemm::ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
if (tensor_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
if (reference_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view());
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Gemm_device_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Gemm::ThreadblockShape::kM << "x"
<< Gemm::ThreadblockShape::kN << "x"
<< Gemm::ThreadblockShape::kK << "_"
<< Gemm::WarpShape::kM << "x"
<< Gemm::WarpShape::kN << "x"
<< Gemm::WarpShape::kK << ".txt";
std::ofstream file(fname.str());
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\nComputed =\n" << tensor_D.host_view();
}
return passed;
}
/// Verifies the result is a GEMM
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
//
// Verify
//
cutlass::reference::host::Gemm<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
typename Gemm::ElementC, typename Gemm::LayoutC, ElementCompute,
ElementAccumulator, typename Gemm::Operator>
reference_gemm;
reference_gemm(
problem_size,
alpha,
tensor_A.host_ref(),
tensor_B.host_ref(),
beta,
reference_D.host_ref(),
ElementAccumulator(0)
);
if (Relu) {
for (int i = 0; i < problem_size.m(); ++i) {
for (int j = 0; j < problem_size.n(); ++j) {
reference_D.at(cutlass::MatrixCoord(i, j)) =
((ElementCompute)reference_D.at(cutlass::MatrixCoord(i, j)) < (ElementCompute)0)
? (typename Gemm::ElementC)0
: reference_D.at(cutlass::MatrixCoord(i, j));
}
}
}
return compare_reference(problem_size, alpha, beta);
}
/// Determine if the CUDA device is sufficient to run the kernel
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmCoord problem_size,
int split_k_slices = 1,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0))
{
/*
std::cout << "\n-----------------------\n";
std::cout << "problem size: " << problem_size << "\n";
std::cout << "split_k_slices: " << split_k_slices << "\n";
std::cout << "alpha: " << alpha << "\n";
std::cout << "beta: " << beta << "\n";
std::cout << "-----------------------\n\n";
*/
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D.device_ref(),
{alpha, beta},
split_k_slices
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Error with split_k_slices = " << split_k_slices << ", alpha: " << alpha << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm, bool Relu=false>
bool TestAllGemmBasic(
const typename Gemm::LayoutA::Stride& stride_factor_A = typename Gemm::LayoutA::Stride(),
const typename Gemm::LayoutB::Stride& stride_factor_B = typename Gemm::LayoutB::Stride(),
const typename Gemm::LayoutC::Stride& stride_factor_C = typename Gemm::LayoutC::Stride()) {
bool passed = true;
int const kMinimumOperandElementSize =
std::min(
int(cutlass::sizeof_bits<typename Gemm::ElementA>::value),
int(cutlass::sizeof_bits<typename Gemm::ElementB>::value));
int const kAlignment = cutlass::platform::is_same<
typename Gemm::OperatorClass,
cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize;
// int8_t gemm alignment constraints
int const kAlignmentM = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Gemm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Gemm::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment;
int const kAlignmentN = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Gemm::ElementB, int8_t>::value &&
cutlass::platform::is_same<typename Gemm::LayoutB, cutlass::layout::RowMajor>::value ? 4 : kAlignment;
int const kAlignmentK = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Gemm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Gemm::ElementB, int8_t>::value &&
(cutlass::platform::is_same<typename Gemm::LayoutA, cutlass::layout::RowMajor>::value ||
cutlass::platform::is_same<typename Gemm::LayoutB, cutlass::layout::ColumnMajor>::value) ? 4 : kAlignment;
int problem_size_m[] = {kAlignmentM, 512 - 3 * kAlignmentM};
int problem_size_n[] = {kAlignmentN, 512 - 2 * kAlignmentN};
int problem_size_k[] = {
kAlignmentK, Gemm::ThreadblockShape::kK * (Gemm::kStages + 1) - kAlignmentK};
int split_k_slices[] = {
1, 2, 3
};
double problem_alpha[] = {
1
};
double problem_beta[] = {
2.0
};
Testbed<Gemm, Relu> testbed(stride_factor_A, stride_factor_B, stride_factor_C);
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (int split_k : split_k_slices) {
if (!Gemm::kSplitKSerial && split_k > 1) {
continue;
}
if (split_k > 1 && k / Gemm::ThreadblockShape::kK < split_k) {
continue;
}
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
cutlass::gemm::GemmCoord problem_size(m, n, k);
passed = testbed.run(
problem_size,
split_k,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
if (!passed) {
return false;
}
}
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm, bool Relu=false>
bool TestAllGemm(
const typename Gemm::LayoutA::Stride& stride_factor_A,
const typename Gemm::LayoutB::Stride& stride_factor_B = typename Gemm::LayoutB::Stride(),
const typename Gemm::LayoutC::Stride& stride_factor_C = typename Gemm::LayoutC::Stride())
{
// Test basic GEMM with non-default stride factors
return TestAllGemmBasic<Gemm, Relu>(stride_factor_A, stride_factor_B, stride_factor_C);
}
template <typename Gemm, bool Relu=false>
bool TestAllGemm()
{
#ifdef NDEBUG
// Non-debug builds also test basic GEMM with default stride factors
if (!TestAllGemmBasic<Gemm, Relu>()) {
return false;
}
#endif // NDEBUG
// Test universal GEMM
#if 0
// Define the universal kernel
using UniversalKernel = cutlass::gemm::kernel::GemmUniversal<
typename Gemm::GemmKernel::Mma, // Mma
typename Gemm::GemmKernel::Epilogue, // Epilogue
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<> // ThreadblockSwizzle
>;
#else
// Define the streamk universal kernel
using UniversalKernel = cutlass::gemm::kernel::GemmUniversalStreamk<
typename Gemm::GemmKernel::Mma, // Mma
typename Gemm::GemmKernel::Epilogue, // Epilogue
cutlass::gemm::threadblock::ThreadblockSwizzleStreamK // ThreadblockSwizzle
>;
#endif
// Define the universal adaptor
using UniversalGemm = cutlass::gemm::device::GemmUniversalAdapter<UniversalKernel>;
// Test universal GEMM
return TestAllGemmUniversal<UniversalGemm, Relu>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
bool TestGemmPerf(int iterations = 1) {
bool passed = true;
int problem_size_m[] = { 2048 };
int problem_size_n[] = { 4352 };
int problem_size_k[] = { 4096 };
int split_k_slices[] = { 1 };
double problem_alpha[] = { 1 };
double problem_beta[] = { 0.0 };
Testbed<Gemm> testbed;
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (int split_k : split_k_slices) {
if (!Gemm::kSplitKSerial && split_k > 1) {
continue;
}
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
cutlass::gemm::GemmCoord problem_size(m, n, k);
for (int i = 0; i < iterations; i++){
passed = testbed.run(
problem_size,
split_k,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
}
if (!passed) {
return false;
}
}
}
}
}
}
}
return passed;
}
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed.h/0 | {
"file_path": "test/unit/gemm/device/testbed.h",
"repo_id": "test",
"token_count": 8244
} | 52 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide TRMM interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/blas3.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/reference/host/trmm.h"
#include "cutlass/util/reference/host/trmm_complex.h"
#include "cutlass/core_io.h"
#include "testbed_utils.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Trmm>
struct TestbedTrmmUniversal {
using ElementA = typename Trmm::ElementA;
using ElementB = typename Trmm::ElementB;
using ElementC = typename Trmm::ElementC;
using ElementAccumulator = typename Trmm::ElementAccumulator;
using ElementCompute = typename Trmm::TrmmKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_D;
uint64_t seed;
cutlass::HostTensor<typename Trmm::ElementA, typename Trmm::LayoutA> tensor_A;
cutlass::HostTensor<typename Trmm::ElementB, typename Trmm::LayoutB> tensor_B;
cutlass::HostTensor<typename Trmm::ElementC, typename Trmm::LayoutC> tensor_D;
cutlass::HostTensor<typename Trmm::ElementC, typename Trmm::LayoutC> reference_D;
//
// Methods
//
TestbedTrmmUniversal(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_D_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_D(init_D_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed,
int mantissa_in_bits) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Trmm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_symmetric_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed,
int mantissa_in_bits) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Trmm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillSymmetricRandomUniform(
view, seed, Trmm::kFillMode, scope_max, scope_min, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillSymmetricRandomGaussian(
view, seed, Trmm::kFillMode, 0, 0.5, mantissa_in_bits);
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Helper to initialize a tensor view (pad diagonal fill with zeros for up to alignment on wrong side of diagonal)
template <typename Element, typename Layout>
bool initialize_pad_diagonal_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed,
int alignment) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Trmm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillPadDiagonalRandomUniform(
view, seed, Trmm::kFillMode, scope_max, scope_min, 0, alignment);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
EXPECT_TRUE(false) << "Gaussian distribution for pad diagonal not implemented";
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the TRMM workspace
//
if (Trmm::kSideMode == cutlass::SideMode::kLeft) {
tensor_A.resize(cutlass::make_Coord(problem_size.m(),problem_size.m()));
}
else if (Trmm::kSideMode == cutlass::SideMode::kRight) {
tensor_A.resize(cutlass::make_Coord(problem_size.n(),problem_size.n()));
}
tensor_B.resize(problem_size.mn());
tensor_D.resize(problem_size.mn());
reference_D.resize(problem_size.mn(), false);
//EXPECT_TRUE(initialize_symmetric_tensor(tensor_A.host_view(), init_A, seed + 2017));
//EXPECT_TRUE(initialize_pad_diagonal_tensor(tensor_A.host_view(), init_A, seed + 2017, Trmm::kAlignmentA));
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2017, cutlass::MantissaInBits<typename Trmm::ElementA>::bits));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2019, cutlass::MantissaInBits<typename Trmm::ElementB>::bits));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = typename Trmm::ElementA(1);
tensor_B.host_view().at({0, 0}) = typename Trmm::ElementB(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_D.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_D.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
if (tensor_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
if (reference_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
double l2_norm = cutlass::reference::host::TensorRelativeErrorMetric(reference_D.host_view(), tensor_D.host_view());
bool passed = l2_norm < cutlass::MantissaInBits<typename Trmm::ElementA>::error;
return passed;
}
/// Verifies the result is a TRMM
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha) {
//
// Verify
//
using HostReference = typename cutlass::platform::conditional<
(cutlass::platform::is_same<typename Trmm::ElementC,
cutlass::complex<double>
>::value ||
cutlass::platform::is_same<typename Trmm::ElementC,
cutlass::complex<float>
>::value
),
cutlass::reference::host::TrmmComplex<
typename Trmm::ElementA, typename Trmm::LayoutA,
Trmm::kTransformA,
Trmm::kSideMode, Trmm::kFillMode, Trmm::kDiagType,
typename Trmm::ElementB, typename Trmm::LayoutB,
Trmm::kTransformB,
typename Trmm::ElementC, typename Trmm::LayoutC,
ElementCompute,
ElementAccumulator>,
cutlass::reference::host::Trmm<
typename Trmm::ElementA, typename Trmm::LayoutA,
Trmm::kSideMode, Trmm::kFillMode, Trmm::kDiagType,
typename Trmm::ElementB, typename Trmm::LayoutB,
typename Trmm::ElementC, typename Trmm::LayoutC,
ElementCompute,
ElementAccumulator>
>::type;
HostReference reference_trmm;
reference_trmm(
problem_size,
alpha,
tensor_A.host_ref(),
tensor_B.host_ref(),
reference_D.host_ref(),
ElementAccumulator(0)
);
return compare_reference(problem_size, alpha);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Trmm::TrmmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmUniversalMode mode,
cutlass::gemm::GemmCoord problem_size,
int batch_count = 1,
ElementCompute alpha = ElementCompute(1)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
#if 0
std::cout << "[TestbedTrmmUniversal::run()] problem(m, n, k): " << problem_size
<< " alpha: " << ElementCompute(alpha) << std::endl;
#endif
this->initialize(problem_size);
//
// Initialize the TRMM operator
//
int batch_stride_A;
if (Trmm::kSideMode == cutlass::SideMode::kLeft)
batch_stride_A = problem_size.m()*problem_size.m();
if (Trmm::kSideMode == cutlass::SideMode::kRight)
batch_stride_A = problem_size.n()*problem_size.n();
typename Trmm::Arguments arguments{
mode,
problem_size,
batch_count,
{alpha},
tensor_A.device_data(),
tensor_B.device_data(),
tensor_D.device_data(),
batch_stride_A,
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
tensor_A.layout().stride(0),
tensor_B.layout().stride(0),
tensor_D.layout().stride(0)
};
Trmm trmm_op;
size_t workspace_size = Trmm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = trmm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the TRMM
//
status = trmm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha);
if (!passed) {
std::stringstream fname;
fname << "error_Trmm_device_"
<< "fill_mode_"
<< (Trmm::kFillMode == cutlass::FillMode::kLower ? "lower_" :
(Trmm::kFillMode == cutlass::FillMode::kUpper ? "upper_" : "invalid_"))
<< "side_mode_"
<< (Trmm::kSideMode == cutlass::SideMode::kLeft ? "left_" :
(Trmm::kSideMode == cutlass::SideMode::kRight ? "right_" : "invalid_"))
<< "mnk_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Trmm::ThreadblockShape::kM << "x"
<< Trmm::ThreadblockShape::kN << "x"
<< Trmm::ThreadblockShape::kK << "_"
<< Trmm::WarpShape::kM << "x"
<< Trmm::WarpShape::kN << "x"
<< Trmm::WarpShape::kK << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results << problem_size << std::endl;
results
<< "\nA:\n" << tensor_A.host_view() << "\n"
<< "\nB:\n" << tensor_B.host_view() << "\n"
<< "\nD reference:\n" << reference_D.host_view() << "\n"
<< "\nD computed:\n" << tensor_D.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Trmm>
bool TestTrmmUniversal(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmUniversalMode mode,
int batch_count,
double alpha = 1.0) {
bool passed = true;
TestbedTrmmUniversal<Trmm> testbed;
using ElementCompute = typename Trmm::EpilogueOutputOp::ElementCompute;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementCompute>(alpha)
);
return passed;
}
template <typename Trmm>
bool TestAllTrmmUniversal() {
bool passed = true;
int const kMinimumOperandElementSize = int(cutlass::sizeof_bits<typename Trmm::ElementA>::value);
int const kAlignment = cutlass::platform::is_same<
typename Trmm::OperatorClass,
cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize;
// int8_t gemm alignment constraints
int const kAlignmentM = cutlass::platform::is_same<typename Trmm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Trmm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Trmm::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment;
int const kAlignmentN = kAlignmentM;
int const kAlignmentK = cutlass::platform::is_same<typename Trmm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Trmm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Trmm::LayoutA, cutlass::layout::RowMajor>::value
? 4 : kAlignment;
cutlass::gemm::GemmUniversalMode modes[] = {
cutlass::gemm::GemmUniversalMode::kGemm,
};
int problem_size_m[] = {
kAlignmentK,
Trmm::ThreadblockShape::kK * Trmm::kStages - kAlignmentK,
Trmm::ThreadblockShape::kK * Trmm::kStages * 3 - kAlignmentK
};
int problem_size_n[] = {
kAlignmentN, 512 - 2*kAlignmentN
};
int batch_counts[] = { // may be interpretted as batch count or split-K slices
1 // Just running one batch for now (removing 2, 3, 5, 7)
};
double problem_alpha[] = {
1.0, 2.0
};
using ElementCompute = typename Trmm::EpilogueOutputOp::ElementCompute;
for (cutlass::gemm::GemmUniversalMode mode : modes) {
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int batch_count : batch_counts) {
for (auto alpha : problem_alpha) {
int k = 0;
if (Trmm::kSideMode == cutlass::SideMode::kLeft)
k = m;
else if (Trmm::kSideMode == cutlass::SideMode::kRight)
k = n;
if (mode == cutlass::gemm::GemmUniversalMode::kGemm ||
mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) {
#if 0
// skip very small K problems
if (k / batch_count < 2 * Trmm::ThreadblockShape::kK) {
continue;
}
#endif
}
cutlass::gemm::GemmCoord problem_size(m, n, k);
TestbedTrmmUniversal<Trmm> testbed;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementCompute>(alpha)
);
if (!passed) {
return false;
}
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_trmm_universal.h/0 | {
"file_path": "test/unit/gemm/device/testbed_trmm_universal.h",
"repo_id": "test",
"token_count": 8616
} | 53 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests cutlass::transform::threadblock::PredicatedTileIterator
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace transform {
namespace threadblock {
namespace kernel {
/// Copy with an iterator
template <typename Iterator>
__global__ void copy(
typename Iterator::Params dst_params,
typename Iterator::Element *dst_pointer,
typename Iterator::Params src_params,
typename Iterator::Element *src_pointer,
cutlass::Coord<2> extent) {
Iterator dst_iterator(dst_params, dst_pointer, extent, threadIdx.x);
Iterator src_iterator(src_params, src_pointer, extent, threadIdx.x);
int iterations = (extent[1] + Iterator::Shape::kStrided - 1) / Iterator::Shape::kStrided;
typename Iterator::Fragment frag;
for(size_t i = 0; i < frag.size(); i++)
frag[i] = 0;
src_iterator.load(frag);
dst_iterator.store(frag);
++dst_iterator;
++src_iterator;
for (; iterations > 1; --iterations) {
src_iterator.load(frag);
dst_iterator.store(frag);
++dst_iterator;
++src_iterator;
}
}
} // namespace kernel
} // namespace threadblock
} // namespace transform
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined) {
using Shape = cutlass::layout::PitchLinearShape<64, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<Shape, kThreads>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator<
Shape, Element, Layout, 1, ThreadMap
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(57, 35);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 35);
cutlass::HostTensor<int, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity());
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]; ++s) {
for (int c = 0; c < alloc_extent[0]; ++c) {
Element expected = Element(0);
if (c < copy_extent[0] && s < copy_extent[1]) {
expected = src_tensor.at({c, s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({c, s});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_128x4) {
using Shape = cutlass::layout::PitchLinearShape<128, 4>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap, false
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(128, 4);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(128, 4);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity());
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]; ++s) {
for (int c = 0; c < alloc_extent[0]; ++c) {
Element expected = Element(0);
if (c < copy_extent[0] && s < copy_extent[1]) {
expected = src_tensor.at({c, s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({c, s});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_128x64) {
using Shape = cutlass::layout::PitchLinearShape<128, 64>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(128, 64);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(128, 64);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity());
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]; ++s) {
for (int c = 0; c < alloc_extent[0]; ++c) {
Element expected = Element(0);
if (c < copy_extent[0] && s < copy_extent[1]) {
expected = src_tensor.at({c, s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({c, s});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_64x64) {
using Shape = cutlass::layout::PitchLinearShape<64, 64>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(64, 64);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 64);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity());
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]; ++s) {
for (int c = 0; c < alloc_extent[0]; ++c) {
Element expected = Element(0);
if (c < copy_extent[0] && s < copy_extent[1]) {
expected = src_tensor.at({c, s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({c, s});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_64x8) {
using Shape = cutlass::layout::PitchLinearShape<64, 8>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(32, 8);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 8);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity());
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]; ++s) {
for (int c = 0; c < alloc_extent[0]; ++c) {
Element expected = Element(0);
if (c < copy_extent[0] && s < copy_extent[1]) {
expected = src_tensor.at({c, s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({c, s});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_64x32_transpose4x4) {
using Shape = cutlass::layout::PitchLinearShape<64, 8>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap, true
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(64, 32);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 32);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
uint64_t seed = 7;
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::TensorFillRandomUniform(src_tensor.host_view(), seed, 8, -8, 0);
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]/4; ++s) {
for (int c = 0; c < alloc_extent[0]/4; ++c) {
for (int s1 = 0; s1 < 4; s1++){
for(int c1 = 0; c1 < 4; c1++){
Element expected = Element(0);
int l_c = c * 4 + c1;
int l_s = s * 4 + s1;
int l_tc = c * 4 + s1;
int l_ts = s * 4 + c1;
if (l_c < copy_extent[0] && l_s < copy_extent[1]) {
expected = src_tensor.at({l_c, l_s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({l_tc, l_ts});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_64x29_transpose4x4) {
using Shape = cutlass::layout::PitchLinearShape<64, 8>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap, true
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(64, 29);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 29);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
uint64_t seed = 7;
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::TensorFillRandomUniform(src_tensor.host_view(), seed, 8, -8, 0);
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]/4; ++s) {
for (int c = 0; c < alloc_extent[0]/4; ++c) {
for (int s1 = 0; s1 < 4; s1++){
for(int c1 = 0; c1 < 4; c1++){
Element expected = Element(0);
int l_c = c * 4 + c1;
int l_s = s * 4 + s1;
int l_tc = c * 4 + s1;
int l_ts = s * 4 + c1;
if (l_c < copy_extent[0] && l_s < copy_extent[1]) {
expected = src_tensor.at({l_c, l_s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({l_tc, l_ts});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_120x4_transpose4x4) {
using Shape = cutlass::layout::PitchLinearShape<128, 4>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap, true
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(120, 4);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(120, 4);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
uint64_t seed = 7;
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::TensorFillRandomUniform(src_tensor.host_view(), seed, 8, -8, 0);
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]/4; ++s) {
for (int c = 0; c < alloc_extent[0]/4; ++c) {
for (int s1 = 0; s1 < 4; s1++){
for(int c1 = 0; c1 < 4; c1++){
Element expected = Element(0);
int l_c = c * 4 + c1;
int l_s = s * 4 + s1;
int l_tc = c * 4 + s1;
int l_ts = s * 4 + c1;
if (l_c < copy_extent[0] && l_s < copy_extent[1]) {
expected = src_tensor.at({l_c, l_s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({l_tc, l_ts});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_48x29_transpose4x4) {
using Shape = cutlass::layout::PitchLinearShape<64, 8>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap, true
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(48, 29);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(48, 29);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
uint64_t seed = 7;
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::TensorFillRandomUniform(src_tensor.host_view(), seed, 8, -8, 0);
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]/4; ++s) {
for (int c = 0; c < alloc_extent[0]/4; ++c) {
for (int s1 = 0; s1 < 4; s1++){
for(int c1 = 0; c1 < 4; c1++){
Element expected = Element(0);
int l_c = c * 4 + c1;
int l_s = s * 4 + s1;
int l_tc = c * 4 + s1;
int l_ts = s * 4 + c1;
if (l_c < copy_extent[0] && l_s < copy_extent[1]) {
expected = src_tensor.at({l_c, l_s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({l_tc, l_ts});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/transform/threadblock/predicated_tile_iterator.cu/0 | {
"file_path": "test/unit/transform/threadblock/predicated_tile_iterator.cu",
"repo_id": "test",
"token_count": 9998
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Utilities accompanying the CUTLASS library for interacting with Library types.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Lexical cast from string
template <typename T> T from_string(std::string const &);
/// Converts a Provider enumerant to a string
char const *to_string(Provider provider, bool pretty = false);
/// Parses a Provider enumerant from a string
template <> Provider from_string<Provider>(std::string const &str);
/// Converts a GemmKind enumerant to a string
char const *to_string(GemmKind type, bool pretty = false);
/// Converts a RankKKind enumerant to a string
char const *to_string(RankKKind type, bool pretty = false);
/// Converts a TrmmKind enumerant to a string
char const *to_string(TrmmKind type, bool pretty = false);
/// Converts a SymmKind enumerant to a string
char const *to_string(SymmKind type, bool pretty = false);
/// Converts a SideMode enumerant to a string
char const *to_string(SideMode type, bool pretty = false);
/// Converts a FillMode enumerant to a string
char const *to_string(FillMode type, bool pretty = false);
/// Converts a BlasMode enumerant to a string
char const *to_string(BlasMode type, bool pretty = false);
/// Converts a DiagType enumerant to a string
char const *to_string(DiagType type, bool pretty = false);
/// Converts a NumericType enumerant to a string
char const *to_string(OperationKind type, bool pretty = false);
/// Parses a NumericType enumerant from a string
template <> OperationKind from_string<OperationKind>(std::string const &str);
/// Converts a NumericType enumerant to a string
char const *to_string(NumericTypeID type, bool pretty = false);
/// Parses a NumericType enumerant from a string
template <> NumericTypeID from_string<NumericTypeID>(std::string const &str);
/// Returns the size of a data type in bits
int sizeof_bits(NumericTypeID type);
/// Returns true if the numeric type is a complex data type or false if real-valued.
bool is_complex_type(NumericTypeID type);
/// Returns the real-valued type underlying a type (only different from 'type' if complex)
NumericTypeID get_real_type(NumericTypeID type);
/// Returns true if numeric type is integer
bool is_integer_type(NumericTypeID type);
/// Returns true if numeric type is signed
bool is_signed_type(NumericTypeID type);
/// Returns true if numeric type is a signed integer
bool is_signed_integer(NumericTypeID type);
/// returns true if numeric type is an unsigned integer
bool is_unsigned_integer(NumericTypeID type);
/// Returns true if numeric type is floating-point type
bool is_float_type(NumericTypeID type);
/// To string method for cutlass::Status
char const *to_string(Status status, bool pretty = false);
/// Converts a LayoutTypeID enumerant to a string
char const *to_string(LayoutTypeID layout, bool pretty = false);
/// Parses a LayoutType enumerant from a string
template <> LayoutTypeID from_string<LayoutTypeID>(std::string const &str);
/// Returns the rank of a layout's stride base on the LayoutTypeID
int get_layout_stride_rank(LayoutTypeID layout_id);
/// Converts a OpcodeClassID enumerant to a string
char const *to_string(OpcodeClassID type, bool pretty = false);
/// Converts a OpcodeClassID enumerant from a string
template <>
OpcodeClassID from_string<OpcodeClassID>(std::string const &str);
/// Converts a ComplexTransform enumerant to a string
char const *to_string(ComplexTransform type, bool pretty = false);
/// Converts a ComplexTransform enumerant from a string
template <>
ComplexTransform from_string<ComplexTransform>(std::string const &str);
/// Converts a SplitKMode enumerant to a string
char const *to_string(SplitKMode split_k_mode, bool pretty = false);
/// Converts a SplitKMode enumerant from a string
template <>
SplitKMode from_string<SplitKMode>(std::string const &str);
/// Converts a ConvModeID enumerant to a string
char const *to_string(ConvModeID type, bool pretty = false);
/// Converts a ConvModeID enumerant from a string
template <>
ConvModeID from_string<ConvModeID>(std::string const &str);
/// Converts a IteratorAlgorithmID enumerant to a string
char const *to_string(IteratorAlgorithmID type, bool pretty = false);
/// Converts a IteratorAlgorithmID enumerant from a string
template <>
IteratorAlgorithmID from_string<IteratorAlgorithmID>(std::string const &str);
/// Converts a ConvKind enumerant to a string
char const *to_string(ConvKind type, bool pretty = false);
/// Converts a ConvKind enumerant from a string
template <>
ConvKind from_string<ConvKind>(std::string const &str);
/// Converts a RasterOrder enumerant to a string
char const *to_string(RasterOrder type, bool pretty = false);
/// Convers a RasterOrder enumerant from a string
template<>
RasterOrder from_string<RasterOrder>(std::string const &str);
/// Lexical cast from int64_t to string
std::string lexical_cast(int64_t int_value);
/// Lexical cast a string to a byte array. Returns true if cast is successful or false if invalid.
bool lexical_cast(std::vector<uint8_t> &bytes, NumericTypeID type, std::string const &str);
/// Lexical cast TO a string FROM a byte array. Returns true if cast is successful or false if invalid.
std::string lexical_cast(std::vector<uint8_t> &bytes, NumericTypeID type);
/// Casts from a signed int64 to the destination type. Returns true if successful.
bool cast_from_int64(std::vector<uint8_t> &bytes, NumericTypeID type, int64_t src);
/// Casts from an unsigned int64 to the destination type. Returns true if successful.
bool cast_from_uint64(std::vector<uint8_t> &bytes, NumericTypeID type, uint64_t src);
/// Casts from a real value represented as a double to the destination type. Returns true if successful.
bool cast_from_double(std::vector<uint8_t> &bytes, NumericTypeID type, double src);
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/include/cutlass/library/util.h/0 | {
"file_path": "tools/library/include/cutlass/library/util.h",
"repo_id": "tools",
"token_count": 2185
} | 55 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief
*/
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "conv_reference_operation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_conv3d_reference_operations(Manifest &manifest) {
make_conv_all<
3,
cutlass::half_t, cutlass::layout::TensorNDHWC,
cutlass::half_t, cutlass::layout::TensorNDHWC,
cutlass::half_t, cutlass::layout::TensorNDHWC,
cutlass::half_t,
cutlass::half_t
>(manifest);
make_conv_all<
3,
cutlass::half_t, cutlass::layout::TensorNDHWC,
cutlass::half_t, cutlass::layout::TensorNDHWC,
cutlass::half_t, cutlass::layout::TensorNDHWC,
float,
float
>(manifest);
make_conv_all<
3,
cutlass::half_t, cutlass::layout::TensorNDHWC,
cutlass::half_t, cutlass::layout::TensorNDHWC,
float, cutlass::layout::TensorNDHWC,
float,
float
>(manifest);
make_conv_all<
3,
cutlass::bfloat16_t, cutlass::layout::TensorNDHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNDHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNDHWC,
float,
float
>(manifest);
make_conv_all<
3,
cutlass::bfloat16_t, cutlass::layout::TensorNDHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNDHWC,
float, cutlass::layout::TensorNDHWC,
float,
float
>(manifest);
make_conv_all<
3,
cutlass::tfloat32_t, cutlass::layout::TensorNDHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNDHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNDHWC,
float,
float
>(manifest);
make_conv_all<
3,
cutlass::tfloat32_t, cutlass::layout::TensorNDHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNDHWC,
float, cutlass::layout::TensorNDHWC,
float,
float
>(manifest);
make_conv_all<
3,
float, cutlass::layout::TensorNDHWC,
float, cutlass::layout::TensorNDHWC,
float, cutlass::layout::TensorNDHWC,
float,
float
>(manifest);
make_conv_fprop<
3,
int8_t, cutlass::layout::TensorNDHWC,
int8_t, cutlass::layout::TensorNDHWC,
int32_t, cutlass::layout::TensorNDHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
3,
int8_t, cutlass::layout::TensorNDHWC,
int8_t, cutlass::layout::TensorNDHWC,
int8_t, cutlass::layout::TensorNDHWC,
float,
int32_t,
NumericConverterClamp<int8_t, float>
>(manifest);
make_conv_fprop<
3,
uint8_t, cutlass::layout::TensorNDHWC,
uint8_t, cutlass::layout::TensorNDHWC,
int32_t, cutlass::layout::TensorNDHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
3,
uint8_t, cutlass::layout::TensorNDHWC,
uint8_t, cutlass::layout::TensorNDHWC,
int8_t, cutlass::layout::TensorNDHWC,
float,
int32_t,
NumericConverterClamp<int8_t, float>
>(manifest);
make_conv_fprop<
3,
cutlass::int4b_t, cutlass::layout::TensorNDHWC,
cutlass::int4b_t, cutlass::layout::TensorNDHWC,
int32_t, cutlass::layout::TensorNDHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
3,
cutlass::int4b_t, cutlass::layout::TensorNDHWC,
cutlass::int4b_t, cutlass::layout::TensorNDHWC,
cutlass::int4b_t, cutlass::layout::TensorNDHWC,
float,
int32_t,
NumericConverterClamp<cutlass::int4b_t, float>
>(manifest);
make_conv_fprop<
3,
cutlass::uint4b_t, cutlass::layout::TensorNDHWC,
cutlass::uint4b_t, cutlass::layout::TensorNDHWC,
int32_t, cutlass::layout::TensorNDHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
3,
cutlass::uint4b_t, cutlass::layout::TensorNDHWC,
cutlass::uint4b_t, cutlass::layout::TensorNDHWC,
cutlass::uint4b_t, cutlass::layout::TensorNDHWC,
float,
int32_t,
NumericConverterClamp<cutlass::uint4b_t, float>
>(manifest);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/reference/conv3d.cu/0 | {
"file_path": "tools/library/src/reference/conv3d.cu",
"repo_id": "tools",
"token_count": 2382
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines reference operations for GEMM operation kinds in CUTLASS Library
*/
#pragma once
#include <iostream>
#include <sstream>
#include <cstring>
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "cutlass/library/util.h"
#include "library_internal.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/device/gemm_complex.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
Provider Provider_,
typename ElementA_,
typename LayoutA_,
cutlass::ComplexTransform TransformA,
typename ElementB_,
typename LayoutB_,
cutlass::ComplexTransform TransformB,
typename ElementC_,
typename LayoutC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ElementD_ = ElementC_,
typename ConvertOp_ = NumericConverter<ElementD_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
class GemmReferenceOperation : public Operation {
public:
static Provider const kProvider = Provider_;
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA, LayoutA>;
static cutlass::ComplexTransform const kTransformA = TransformA;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB, LayoutB>;
static cutlass::ComplexTransform const kTransformB = TransformB;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ElementD = ElementD_;
using TensorRefC = TensorRef<ElementC, LayoutC>;
using TensorRefD = TensorRef<ElementD, LayoutC>;
using ElementCompute = ElementCompute_;
using ElementAccumulator = ElementAccumulator_;
using ConvertOp = ConvertOp_;
using InnerProductOp = InnerProductOp_;
protected:
/// Storage for the name string
std::string name_;
///
GemmDescription description_;
public:
/// Constructor
GemmReferenceOperation() {
// Basic information
description_.provider = kProvider;
description_.kind = OperationKind::kGemm;
description_.gemm_kind = GemmKind::kUniversal;
// Tensor description
description_.A = make_TensorDescription<ElementA, LayoutA>();
description_.transform_A = ComplexTransformMap<kTransformA>::kId;
description_.B = make_TensorDescription<ElementB, LayoutB>();
description_.transform_B = ComplexTransformMap<kTransformB>::kId;
description_.C = make_TensorDescription<ElementC, LayoutC>();
description_.D = make_TensorDescription<ElementD, LayoutC>();
// Epilogue compute and accumulator type description
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
// Compute capability for gemm reference
description_.tile_description.minimum_compute_capability =
(kProvider == Provider::kReferenceDevice ? 50 : 0);
description_.tile_description.maximum_compute_capability = 1024;
// Procedural name
std::stringstream ss;
ss << "gemm"
<< "_reference_" << to_string(description_.provider)
<< "_" << to_string(description_.A.element) << to_string(description_.A.layout)
<< "_" << to_string(description_.B.element) << to_string(description_.B.layout)
<< "_" << to_string(description_.C.element) << to_string(description_.C.layout)
<< "_" << to_string(description_.tile_description.math_instruction.element_accumulator);
name_ = ss.str();
description_.name = name_.c_str();
// Epilogue compute and accumulator type description
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
}
/// Returns the description of the GEMM operation
virtual OperationDescription const & description() const {
return description_;
}
virtual Status can_implement(
void const *configuration,
void const *arguments) const {
return Status::kSuccess;
}
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(GemmUniversalConfiguration);
}
virtual uint64_t get_device_workspace_size(
void const *configuration,
void const *arguments = nullptr) const {
return 0;
}
virtual Status initialize(
void const *configuration,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
std::memcpy(host_workspace, configuration, get_host_workspace_size(configuration));
return Status::kSuccess;
}
virtual Status run(
void const *arguments,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
GemmUniversalConfiguration const &config = *static_cast<GemmUniversalConfiguration const *>(host_workspace);
GemmUniversalArguments const &args = *static_cast<GemmUniversalArguments const *>(arguments);
TensorRefA ref_A{static_cast<ElementA *>(const_cast<void *>(args.A)), LayoutA(int(config.lda))};
TensorRefB ref_B{static_cast<ElementB *>(const_cast<void *>(args.B)), LayoutB(int(config.ldb))};
TensorRefC ref_C{static_cast<ElementC *>(const_cast<void *>(args.C)), LayoutC(int(config.ldc))};
TensorRefD ref_D{static_cast<ElementD *>(args.D), LayoutC(int(config.ldd))};
if (kProvider == Provider::kReferenceHost) {
cutlass::reference::host::GemmComplex<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ElementD,
ConvertOp,
InnerProductOp
>(
config.problem_size,
*static_cast<ElementCompute const *>(args.alpha),
ref_A,
kTransformA,
ref_B,
kTransformB,
*static_cast<ElementCompute const *>(args.beta),
ref_C,
ref_D,
ElementAccumulator(),
((config.mode == library::GemmUniversalMode::kBatched) ? config.batch_count : 1),
args.batch_stride_A,
args.batch_stride_B,
args.batch_stride_C,
args.batch_stride_D
);
return Status::kSuccess;
}
else if (kProvider == Provider::kReferenceDevice) {
cutlass::reference::device::GemmComplex<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ElementD,
ConvertOp,
InnerProductOp
>(
config.problem_size,
*static_cast<ElementCompute const *>(args.alpha),
ref_A,
kTransformA,
ref_B,
kTransformB,
*static_cast<ElementCompute const *>(args.beta),
ref_C,
ref_D,
ElementAccumulator(),
((config.mode == library::GemmUniversalMode::kBatched) ? config.batch_count : 1),
args.batch_stride_A,
args.batch_stride_B,
args.batch_stride_C,
args.batch_stride_D
);
return Status::kSuccess;
}
return Status::kErrorNotSupported;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA_,
typename LayoutA_,
cutlass::ComplexTransform TransformA,
typename ElementB_,
typename LayoutB_,
cutlass::ComplexTransform TransformB,
typename ElementC_,
typename LayoutC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ElementD_ = ElementC_,
typename ConvertOp_ = NumericConverter<ElementD_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_gemm(Manifest &manifest) {
#if !defined(CUTLASS_PROFILER_DISABLE_REFERENCE)
manifest.append(new GemmReferenceOperation<
Provider::kReferenceHost,
ElementA_, LayoutA_, TransformA,
ElementB_, LayoutB_, TransformB,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>);
manifest.append(new GemmReferenceOperation<
Provider::kReferenceDevice,
ElementA_, LayoutA_, TransformA,
ElementB_, LayoutB_, TransformB,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>);
#endif
}
/// Helper to create NN, NT, TN, and TT GEMM layouts.
template <
typename ElementA_, cutlass::ComplexTransform TransformA,
typename ElementB_, cutlass::ComplexTransform TransformB,
typename ElementC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ElementD_ = ElementC_,
typename ConvertOp_ = NumericConverter<ElementD_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_gemm_canonical_layouts(Manifest &manifest) {
// M Major outputs
make_gemm<
ElementA_, cutlass::layout::ColumnMajor, TransformA,
ElementB_, cutlass::layout::ColumnMajor, TransformB,
ElementC_, cutlass::layout::ColumnMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm<
ElementA_, cutlass::layout::ColumnMajor, TransformA,
ElementB_, cutlass::layout::RowMajor, TransformB,
ElementC_, cutlass::layout::ColumnMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm<
ElementA_, cutlass::layout::RowMajor, TransformA,
ElementB_, cutlass::layout::ColumnMajor, TransformB,
ElementC_, cutlass::layout::ColumnMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm<
ElementA_, cutlass::layout::RowMajor, TransformA,
ElementB_, cutlass::layout::RowMajor, TransformB,
ElementC_, cutlass::layout::ColumnMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
// N Major outputs
make_gemm<
ElementA_, cutlass::layout::ColumnMajor, TransformA,
ElementB_, cutlass::layout::ColumnMajor, TransformB,
ElementC_, cutlass::layout::RowMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm<
ElementA_, cutlass::layout::ColumnMajor, TransformA,
ElementB_, cutlass::layout::RowMajor, TransformB,
ElementC_, cutlass::layout::RowMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm<
ElementA_, cutlass::layout::RowMajor, TransformA,
ElementB_, cutlass::layout::ColumnMajor, TransformB,
ElementC_, cutlass::layout::RowMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm<
ElementA_, cutlass::layout::RowMajor, TransformA,
ElementB_, cutlass::layout::RowMajor, TransformB,
ElementC_, cutlass::layout::RowMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
}
/// Helper to create TN and interleaved layouts GEMM layouts.
template <
int InterleaveK,
typename ElementA_,
typename ElementB_,
typename ElementC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ElementD_ = ElementC_,
typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_gemm_interleaved_layouts(Manifest &manifest) {
make_gemm<
ElementA_, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone,
ElementB_, cutlass::layout::ColumnMajor, cutlass::ComplexTransform::kNone,
ElementC_, cutlass::layout::ColumnMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
}
/// Helper to real-valued GEMM with canonical layouts
template <
typename ElementA_,
typename ElementB_,
typename ElementC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ElementD_ = ElementC_,
typename ConvertOp_ = NumericConverter<ElementD_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_gemm_real_canonical_layouts(Manifest &manifest) {
make_gemm_canonical_layouts<
ElementA_, cutlass::ComplexTransform::kNone,
ElementB_, cutlass::ComplexTransform::kNone,
ElementC_,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
}
// Helper to create all complex transformation permutations
template <
typename ElementA_,
typename ElementB_,
typename ElementC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ElementD_ = ElementC_,
typename ConvertOp_ = NumericConverter<ElementD_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_gemm_complex_canonical_layouts(Manifest &manifest) {
make_gemm_canonical_layouts<
ElementA_, cutlass::ComplexTransform::kNone,
ElementB_, cutlass::ComplexTransform::kNone,
ElementC_,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm_canonical_layouts<
ElementA_, cutlass::ComplexTransform::kConjugate,
ElementB_, cutlass::ComplexTransform::kConjugate,
ElementC_,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm_canonical_layouts<
ElementA_, cutlass::ComplexTransform::kNone,
ElementB_, cutlass::ComplexTransform::kConjugate,
ElementC_,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm_canonical_layouts<
ElementA_, cutlass::ComplexTransform::kConjugate,
ElementB_, cutlass::ComplexTransform::kNone,
ElementC_,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/reference/gemm_reference_operation.h/0 | {
"file_path": "tools/library/src/reference/gemm_reference_operation.h",
"repo_id": "tools",
"token_count": 5815
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Gemm Profiler
*/
#pragma once
#include <vector>
#include <string>
#include <memory>
#include <algorithm>
#include <unordered_map>
// CUTLASS Library includes
#include "cutlass/library/library.h"
#include "cutlass/library/util.h"
#include "cutlass/library/manifest.h"
// Profiler includes
#include "options.h"
#include "device_context.h"
#include "operation_profiler.h"
#include "performance_result.h"
#include "problem_space.h"
#include "reduction_operation_profiler.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Abstract base class for each math function
class GemmOperationProfiler : public OperationProfiler {
public:
/// Problem structure obtained from problem space
struct GemmProblem {
cutlass::library::GemmUniversalMode mode{library::GemmUniversalMode::kGemm};
int64_t m{16};
int64_t n{16};
int64_t k{16};
int64_t lda{0};
int64_t ldb{0};
int64_t ldc{0};
std::vector<uint8_t> alpha;
std::vector<uint8_t> beta;
cutlass::library::SplitKMode split_k_mode{library::SplitKMode::kNone};
int split_k_slices{1};
int batch_count{1};
cutlass::library::RasterOrder raster_order{cutlass::library::RasterOrder::kHeuristic};
int swizzle_size{1};
// gemm with parallel interleaved reduction
// gemm epilogue (alpha, beta) = (1.0, 0.0)
// reduction epilogue (alpha, beta) = (GemmProblem::alpha, GemmProblem::beta)
std::vector<uint8_t> alpha_one;
std::vector<uint8_t> beta_zero;
//
// Methods
//
/// Parses the problem
Status parse(
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Total number of bytes loaded
int64_t bytes(library::GemmDescription const &operation_desc) const;
/// Total number of flops computed
int64_t flops(library::GemmDescription const &operation_desc) const;
/// Initializes a performance result
void initialize_result(
PerformanceResult &result,
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space);
};
/// Workspace used
struct GemmWorkspace {
DeviceAllocation *A{nullptr};
DeviceAllocation *B{nullptr};
DeviceAllocation *C{nullptr};
DeviceAllocation *Computed{nullptr};
DeviceAllocation *Reference{nullptr};
/// Number of copies of the problem workspace which are visited sequentially during
/// profiling to avoid camping in the last level cache.
int problem_count{1};
library::GemmUniversalConfiguration configuration;
library::GemmUniversalArguments arguments;
/// Buffer used for the operation's host workspace
std::vector<uint8_t> host_workspace;
/// Buffer used for the operations' device workspace
DeviceAllocation device_workspace;
/// Library configuration and arguments for reduction operator
library::ReductionConfiguration reduction_configuration;
library::ReductionArguments reduction_arguments;
/// Buffer used for the cutlass reduction operations' host workspace
std::vector<uint8_t> reduction_host_workspace;
};
protected:
//
// Data members
//
/// GEMM problem obtained from problem space
GemmProblem problem_;
/// Device memory allocations
GemmWorkspace gemm_workspace_;
/// CUTLASS parallel reduction operation to follow this* gemm operation
library::Operation const *reduction_op_;
public:
//
// Methods
//
/// Ctor
GemmOperationProfiler(Options const &options);
/// Destructor
virtual ~GemmOperationProfiler();
GemmProblem const& problem() const { return problem_; }
/// Prints usage statement for the math function
virtual void print_usage(std::ostream &out) const;
/// Prints examples
virtual void print_examples(std::ostream &out) const;
/// Extracts the problem dimensions
virtual Status initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Initializes workspace
virtual Status initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Verifies CUTLASS against references
virtual bool verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Measures performance results
virtual bool profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
protected:
/// Initializes the performance result
void initialize_result_(
PerformanceResult &result,
Options const &options,
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space);
/// Verifies CUTLASS against references
bool verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Verifies CUTLASS against host and device references
bool verify_with_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem,
cutlass::library::NumericTypeID element_A,
cutlass::library::NumericTypeID element_B);
/// Method to profile a CUTLASS Operation
Status profile_cutlass_(
double &runtime,
Options const &options,
library::Operation const *operation,
void *arguments,
void *host_workspace,
void *device_workspace);
/// Initialize reduction problem dimensions and library::Operation
bool initialize_reduction_configuration_(
library::Operation const *operation,
ProblemSpace::Problem const &problem);
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/include/cutlass/profiler/gemm_operation_profiler.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/gemm_operation_profiler.h",
"repo_id": "tools",
"token_count": 2472
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Helper functions for mapping CUTLASS concepts to cuDNN.
*/
#if CUTLASS_ENABLE_CUDNN
#include <stdexcept>
#include "cutlass/profiler/cudnn_helpers.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Converts a cuDNN status to cutlass::Status
Status get_cutlass_status(cudnnStatus_t cudnn_status) {
if (cudnn_status == CUDNN_STATUS_SUCCESS) {
return Status::kSuccess;
}
else if (cudnn_status == CUDNN_STATUS_INVALID_VALUE) {
return Status::kErrorInvalidProblem;
}
if (cudnn_status == CUDNN_STATUS_NOT_SUPPORTED) {
return Status::kErrorNotSupported;
}
return Status::kErrorInternal;
}
/// Converts a cuDNN status to cutlass::profiler::Disposition
Disposition get_cutlass_disposition(cudnnStatus_t cudnn_status) {
if (cudnn_status == CUDNN_STATUS_INVALID_VALUE) {
return Disposition::kInvalidProblem;
}
else if (cudnn_status == CUDNN_STATUS_NOT_SUPPORTED) {
return Disposition::kNotSupported;
}
return Disposition::kFailed;
}
/// Checks cudnnStatus_t converts to cutlas status and returns if Status::kSuccess o.w. throws exception
Status checkCudnnErr(cudnnStatus_t cudnn_status) {
Status cutlass_status = get_cutlass_status(cudnn_status);
if(cutlass_status != Status::kSuccess) {
throw std::runtime_error("checkCudnnErr failed");
}
return cutlass_status;
}
/// Maps a CUTLASS conv mode to a cuDNN cudnnConvolutionMode_t
bool get_cudnn_conv_mode(cudnnConvolutionMode_t &cudnn_conv_mode, conv::Mode conv_mode) {
switch (conv_mode) {
case conv::Mode::kCrossCorrelation:
cudnn_conv_mode = CUDNN_CROSS_CORRELATION;
return true;
case conv::Mode::kConvolution:
cudnn_conv_mode = CUDNN_CONVOLUTION;
return true;
default: break;
}
return false;
}
/// Maps a CUTLASS tensor layout to a cuDNN cudnnTensorFormat_t
bool get_cudnn_layout(cudnnTensorFormat_t &cudnn_layout, library::LayoutTypeID layout) {
switch (layout) {
// cudnn uses the same enum for TensorNC*HW along nDim (ConvDescription::conv_dim)
case library::LayoutTypeID::kTensorNCHW:
case library::LayoutTypeID::kTensorNCDHW:
cudnn_layout = CUDNN_TENSOR_NCHW;
return true;
case library::LayoutTypeID::kTensorNHWC:
case library::LayoutTypeID::kTensorNDHWC:
cudnn_layout = CUDNN_TENSOR_NHWC;
return true;
default: break;
}
return false;
}
/// Maps a CUTLASS numeric type to a cuDNN cudnnDataType_t
bool get_cudnn_datatype(cudnnDataType_t &cudnn_element_type, library::NumericTypeID element_type) {
switch (element_type) {
case library::NumericTypeID::kF16:
cudnn_element_type = CUDNN_DATA_HALF;
return true;
case library::NumericTypeID::kF32:
cudnn_element_type = CUDNN_DATA_FLOAT;
return true;
case library::NumericTypeID::kF64:
cudnn_element_type = CUDNN_DATA_DOUBLE;
return true;
case library::NumericTypeID::kS2:
break;
case library::NumericTypeID::kS4:
break;
case library::NumericTypeID::kS8:
cudnn_element_type = CUDNN_DATA_INT8;
return true;
case library::NumericTypeID::kS16:
break;
case library::NumericTypeID::kS32:
cudnn_element_type = CUDNN_DATA_INT32;
return true;
case library::NumericTypeID::kS64:
break;
case library::NumericTypeID::kU2:
break;
case library::NumericTypeID::kU4:
break;
case library::NumericTypeID::kU8:
cudnn_element_type = CUDNN_DATA_UINT8;
return true;
case library::NumericTypeID::kU16:
break;
case library::NumericTypeID::kU32:
break;
case library::NumericTypeID::kU64:
break;
case library::NumericTypeID::kB1:
break;
case library::NumericTypeID::kInvalid:
default:
break;
}
return false;
}
/// Maps CUTLASS math OpcodeClassID and MathOperationID to cuDNN math_type
bool get_cudnn_mathtype(cudnnMathType_t &cudnn_math_type, library::ConvDescription const &conv_desc) {
switch (conv_desc.tile_description.math_instruction.opcode_class) {
case library::OpcodeClassID::kTensorOp:
{
cudnn_math_type = CUDNN_TENSOR_OP_MATH;
library::MathOperationID math_op = conv_desc.tile_description.math_instruction.math_operation;
// Allow conversion on input data type for fast math operations
if (math_op == library::MathOperationID::kMultiplyAddFastF16 ||
math_op == library::MathOperationID::kMultiplyAddFastBF16)
{
cudnn_math_type = CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION;
}
return true;
}
case library::OpcodeClassID::kSimt:
#if (defined(CUDNN_VERSION) && CUDNN_VERSION <= 8000)
cudnn_math_type = CUDNN_DEFAULT_MATH;
#else
cudnn_math_type = CUDNN_FMA_MATH;
#endif
return true;
}
return false;
}
/// Cudnn compute type seems to be hardcoded to float (To handle a possible cudnn issue)
float cast_cudnn_compute_type_to_float(library::NumericTypeID type, void const * src) {
switch (type) {
case library::NumericTypeID::kF16:
{
return float(*(static_cast<half_t const*>(src)));
}
case library::NumericTypeID::kF32:
{
return float(*(static_cast<float const*>(src)));
}
case library::NumericTypeID::kS32:
{
return float(*(static_cast<int const*>(src)));
}
default:
throw std::runtime_error("Data type handled in cast_compute_type_to_float");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuDNN can satisfy a particular Conv2d description
Status cudnn_satisfies(
library::ConvDescription const &desc,
library::Conv2dConfiguration const &configuration) {
auto const &a_tensor = desc.A;
auto const &b_tensor = desc.B;
auto const &c_tensor = desc.C;
auto const &math_instruction = desc.tile_description.math_instruction;
if(a_tensor.element != b_tensor.element) {
return Status::kErrorInvalidDataType;
}
//////////////////////// Convolution output dimensions p and q ///////////////////////
// Cutlass convolutions support arbitrary output dimensions and not constrained by //
// input, filter, padding, striding, dilation sizes. //
// cuDNN sets the output dimensions (p, q) using following equations: //
// //
// output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) //
// where; div_up(a, b) : (a - 1)/b + 1 //
// //
// Before launching cudnn verification or profiling check that output p and q //
// dimensions are cuDNN compliant. //
// //
// If user sets output p and q which do not follow above constraints, cutlass conv, //
// host reference, device reference can run. However, cudnn convolution returns //
// "Invalid problem" //
// //
///////////////////////////////////////////////////////////////////////////////////////
// check conv output dimension p for cudnn
int cudnn_output_p =
(
(
configuration.problem_size.H +
2 * configuration.problem_size.pad_h -
((configuration.problem_size.R - 1) *
configuration.problem_size.dilation_h + 1)
) /
(configuration.problem_size.stride_h)
+ 1
);
if (cudnn_output_p != configuration.problem_size.P) {
return Status::kErrorInvalidProblem;
}
// check conv output dimension q for cudnn
int cudnn_output_q =
(
(
configuration.problem_size.W +
2 * configuration.problem_size.pad_w -
((configuration.problem_size.S - 1) *
configuration.problem_size.dilation_w + 1)
) /
(configuration.problem_size.stride_w)
+ 1
);
if (cudnn_output_q != configuration.problem_size.Q) {
return Status::kErrorInvalidProblem;
}
//////////////////////////////////////////////////////////////////////////////////////
// conv operator with input=FP16, accumulator=FP32, output=FP32 datatype
if (a_tensor.element == library::NumericTypeID::kF16 &&
b_tensor.element == library::NumericTypeID::kF16 &&
math_instruction.element_accumulator == library::NumericTypeID::kF32 &&
c_tensor.element == library::NumericTypeID::kF32
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kBF16 ||
b_tensor.element == library::NumericTypeID::kBF16 ||
c_tensor.element == library::NumericTypeID::kBF16
) {
return Status::kErrorNotSupported;
}
// TF32 input not supported in cuDNN
if (a_tensor.element == library::NumericTypeID::kTF32 ||
b_tensor.element == library::NumericTypeID::kTF32 ||
c_tensor.element == library::NumericTypeID::kTF32
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kS8 ||
b_tensor.element == library::NumericTypeID::kS8 ||
c_tensor.element == library::NumericTypeID::kS8
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kU8 ||
b_tensor.element == library::NumericTypeID::kU8 ||
c_tensor.element == library::NumericTypeID::kU8
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kS4 ||
b_tensor.element == library::NumericTypeID::kS4 ||
c_tensor.element == library::NumericTypeID::kS4
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kU4 ||
b_tensor.element == library::NumericTypeID::kU4 ||
c_tensor.element == library::NumericTypeID::kU4
) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuDNN can satisfy a particular Conv3d description
Status cudnn_satisfies(
library::ConvDescription const &desc,
library::Conv3dConfiguration const &configuration) {
auto const &a_tensor = desc.A;
auto const &b_tensor = desc.B;
auto const &c_tensor = desc.C;
auto const &math_instruction = desc.tile_description.math_instruction;
if(a_tensor.element != b_tensor.element) {
return Status::kErrorInvalidDataType;
}
//////////////////////// Convolution output dimensions p and q ///////////////////////
// Cutlass convolutions support arbitrary output dimensions and not constrained by //
// input, filter, padding, striding, dilation sizes. //
// cuDNN sets the output dimensions (p, q) using following equations: //
// //
// output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) //
// where; div_up(a, b) : (a - 1)/b + 1 //
// //
// Before launching cudnn verification or profiling check that output p and q //
// dimensions are cuDNN compliant. //
// //
// If user sets output p and q which do not follow above constraints, cutlass conv, //
// host reference, device reference can run. However, cudnn convolution returns //
// "Invalid problem" //
// //
///////////////////////////////////////////////////////////////////////////////////////
// check conv output dimension z for cudnn
int cudnn_output_z =
(
(
configuration.problem_size.D +
2 * configuration.problem_size.pad_d -
((configuration.problem_size.T - 1) *
configuration.problem_size.dilation_d + 1)
) /
(configuration.problem_size.stride_d)
+ 1
);
if (cudnn_output_z != configuration.problem_size.Z) {
return Status::kErrorInvalidProblem;
}
// check conv output dimension p for cudnn
int cudnn_output_p =
(
(
configuration.problem_size.H +
2 * configuration.problem_size.pad_h -
((configuration.problem_size.R - 1) *
configuration.problem_size.dilation_h + 1)
) /
(configuration.problem_size.stride_h)
+ 1
);
if (cudnn_output_p != configuration.problem_size.P) {
return Status::kErrorInvalidProblem;
}
// check conv output dimension q for cudnn
int cudnn_output_q =
(
(
configuration.problem_size.W +
2 * configuration.problem_size.pad_w -
((configuration.problem_size.S - 1) *
configuration.problem_size.dilation_w + 1)
) /
(configuration.problem_size.stride_w)
+ 1
);
if (cudnn_output_q != configuration.problem_size.Q) {
return Status::kErrorInvalidProblem;
}
//////////////////////////////////////////////////////////////////////////////////////
// conv operator with input, accumulator, output datatype of (hss) are not supported
// in cuDNN
if (a_tensor.element == library::NumericTypeID::kF16 &&
b_tensor.element == library::NumericTypeID::kF16 &&
math_instruction.element_accumulator == library::NumericTypeID::kF32 &&
c_tensor.element == library::NumericTypeID::kF32
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kBF16 ||
b_tensor.element == library::NumericTypeID::kBF16 ||
c_tensor.element == library::NumericTypeID::kBF16
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kTF32 ||
b_tensor.element == library::NumericTypeID::kTF32 ||
c_tensor.element == library::NumericTypeID::kTF32
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kS8 ||
b_tensor.element == library::NumericTypeID::kS8 ||
c_tensor.element == library::NumericTypeID::kS8
) {
return Status::kErrorNotSupported;
}
// S4 not supported in cuDNN
if (a_tensor.element == library::NumericTypeID::kS4 ||
b_tensor.element == library::NumericTypeID::kS4 ||
c_tensor.element == library::NumericTypeID::kS4
) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
#endif
| tools/profiler/src/cudnn_helpers.cpp/0 | {
"file_path": "tools/profiler/src/cudnn_helpers.cpp",
"repo_id": "tools",
"token_count": 6868
} | 59 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cutlass/profiler/cublas_helpers.h"
#include "cutlass/profiler/symm_operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
SymmOperationProfiler::SymmOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kSymm,
{
{ArgumentTypeID::kEnumerated, {"symm_kind"}, "Variant of Symm (universal)"},
{ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the Symm problem space"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the Symm problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
{ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
{ArgumentTypeID::kEnumerated, {"side_mode"}, "Side Mode for Symm kernel (left or right)"},
{ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for Symm kernel (lower or upper)"},
{ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for Symm kernel (symmetric or hermitian)"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of Symm computed in one batch"},
},
{ library::Provider::kCUBLAS }
) {
description_ = " Symmetric Matrix-Matrix Multiplication. D = alpha * A * B OR alpha * B * A + beta * C (where A is symmetric/hermitian)";
}
/// Destructor
SymmOperationProfiler::~SymmOperationProfiler() {
}
/// Prints usage statement for the math function
void SymmOperationProfiler::print_usage(std::ostream &out) const {
out << "Symm" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void SymmOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size SYMM kernel:\n"
<< " $ cutlass_profiler --operation=Symm --blas_mode=symmetric --m=1024 --n=128\n\n"
<< "Profile a particular problem size HEMM kernel:\n"
<< " $ cutlass_profiler --operation=Symm --blas_mode=hermitian --m=1024 --n=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=Symm --m=1024:4096:256 --n=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=Symm --accumulator-type=f16,f32\n\n"
<< "Schmoo over side modees:\n"
<< " $ cutlass_profiler --operation=Symm --side_mode=left/right\n\n"
<< "Schmoo over fill modees:\n"
<< " $ cutlass_profiler --operation=Symm --fill_mode=lower/upper\n\n"
<< "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=Symm --A=f16:column or --A=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=Symm --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=Symm --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=Symm --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=Symm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to symm kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=Symm \\ \n"
<< " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --n=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status SymmOperationProfiler::SymmProblem::parse(
library::SymmDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!arg_as_int(this->m, "m", problem_space, problem)) {
// default value
this->m = 1024;
}
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
if (operation_desc.side_mode == SideMode::kLeft) {
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->m), int(this->m)}).front();
}
else if (operation_desc.side_mode == SideMode::kRight) {
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->n), int(this->n)}).front();
}
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->m), int(this->n)}).front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->m), int(this->n)}).front();
return Status::kSuccess;
}
/// Total number of bytes loaded
int64_t SymmOperationProfiler::SymmProblem::bytes(library::SymmDescription const &operation_desc) const {
int64_t bytes = 0;
// Input bytes read and Output bytes written for the gemm problem
// Half matrix including the diagonal will have (X*(X+1))/2 elements
if (operation_desc.side_mode == SideMode::kLeft) {
bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * m / 8) * (m + 1) / 2 +
int64_t(library::sizeof_bits(operation_desc.B.element) * m / 8) * n +
int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
} else if (operation_desc.side_mode == SideMode::kRight) {
bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * (n + 1) / 2 +
int64_t(library::sizeof_bits(operation_desc.B.element) * m / 8) * n +
int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
}
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
}
bytes *= batch_count;
return bytes;
}
/// Total number of flops computed
int64_t SymmOperationProfiler::SymmProblem::flops(library::SymmDescription const &operation_desc) const {
// FLOPs for first TRMM kernel (with diagonal) = 2 * [ ( M * (M+1)/2 * N ) ] // Beta is zero
// FLOPs for second TRMM kernel (with diagonal) = 2 * [ ( M * (M-1)/2 * N ) ] // Beta is zero
// FLOPs = m*(m+1)*n [mma1] + m*(m-1)*n [mma2] + 2*m*n [epilogue]
// FLOPs = 2*m*n(m+1) for left side mode
// FLOPs can also be calculated to be same as GEMM with correct value for 'k' as below.
int64_t k = (operation_desc.side_mode == SideMode::kLeft) ? int64_t(m) : int64_t(n);
int64_t flops_ = (int64_t(m) * n * k + m * n) * 2;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddGaussianComplex:
flops_ *= 3;
break;
default: break;
}
return flops_;
}
/// Initializes a performance result
void SymmOperationProfiler::SymmProblem::initialize_result(
PerformanceResult &result,
library::SymmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "symm_kind", problem_space, library::to_string(operation_desc.symm_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
set_argument(result, "side_mode", problem_space, library::to_string(operation_desc.side_mode));
set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode));
set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode));
set_argument(result, "m", problem_space, m);
set_argument(result, "n", problem_space, n);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status SymmOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::SymmDescription const &operation_desc =
static_cast<library::SymmDescription const &>(operation->description());
if (operation_desc.symm_kind != library::SymmKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
symm_workspace_.configuration.problem_size.m() = int(problem_.m);
symm_workspace_.configuration.problem_size.n() = int(problem_.n);
symm_workspace_.configuration.problem_size.k() = (operation_desc.side_mode == SideMode::kLeft)
? int(problem_.m) : int(problem_.n);
symm_workspace_.configuration.lda = problem_.lda;
symm_workspace_.configuration.ldb = problem_.ldb;
symm_workspace_.configuration.ldc = problem_.ldc;
symm_workspace_.configuration.ldd = problem_.ldc;
//symm_workspace_.configuration.split_k_slices = int(problem_.split_k_slices);
symm_workspace_.configuration.batch_count = int(problem_.split_k_slices);
symm_workspace_.arguments.A = nullptr;
symm_workspace_.arguments.B = nullptr;
symm_workspace_.arguments.C = nullptr;
symm_workspace_.arguments.D = nullptr;
symm_workspace_.arguments.alpha = problem_.alpha.data();
symm_workspace_.arguments.beta = problem_.beta.data();
symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&symm_workspace_.configuration, &symm_workspace_.arguments);
}
/// Initializes the performance result
void SymmOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::SymmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
result.bytes = problem_.bytes(operation_desc);
result.flops = problem_.flops(operation_desc);
result.runtime = 0;
}
/// Initializes workspace
Status SymmOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::SymmDescription const &operation_desc =
static_cast<library::SymmDescription const &>(operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
int seed_shift = 0;
if (operation_desc.side_mode == SideMode::kLeft) {
symm_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.m), int(problem_.m)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
} else if (operation_desc.side_mode == SideMode::kRight) {
symm_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
}
symm_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldb)},
1, // batch_count
seed_shift++
);
symm_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)},
1, // batch_count
seed_shift++
);
symm_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)}
);
symm_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)}
);
symm_workspace_.Computed->copy_from_device(symm_workspace_.C->data());
symm_workspace_.Reference->copy_from_device(symm_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(&symm_workspace_.configuration);
symm_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(&symm_workspace_.configuration);
symm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = operation->initialize(
&symm_workspace_.configuration,
symm_workspace_.host_workspace.data(),
symm_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kSymm;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool SymmOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing Symm arguments
symm_workspace_.arguments.A = symm_workspace_.A->data();
symm_workspace_.arguments.B = symm_workspace_.B->data();
symm_workspace_.arguments.C = symm_workspace_.C->data();
symm_workspace_.arguments.D = symm_workspace_.Computed->data();
symm_workspace_.arguments.alpha = problem_.alpha.data();
symm_workspace_.arguments.beta = problem_.beta.data();
symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&symm_workspace_.arguments,
symm_workspace_.host_workspace.data(),
symm_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const & symm_desc = static_cast<library::SymmDescription const &>(operation->description());
if (cublas_satisfies(symm_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool SymmOperationProfiler::verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
#if CUTLASS_ENABLE_CUBLAS
library::SymmDescription const &symm_desc =
static_cast<library::SymmDescription const &>(operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
cublasStatus_t status = handle.get_cublas_create_status();
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to cublas<t>Symm()
//
// Initialize structure containing Symm arguments
symm_workspace_.arguments.A = symm_workspace_.A->data();
symm_workspace_.arguments.B = symm_workspace_.B->data();
symm_workspace_.arguments.C = symm_workspace_.Reference->data();
symm_workspace_.arguments.D = symm_workspace_.Reference->data();
symm_workspace_.arguments.alpha = problem_.alpha.data();
symm_workspace_.arguments.beta = problem_.beta.data();
symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
detail::cublasSymmDispatcher symm_op(
symm_desc,
symm_workspace_.configuration,
symm_workspace_.arguments
);
if (symm_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = symm_op(handle);
// Handle errors
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
options,
*symm_workspace_.Computed,
*symm_workspace_.Reference
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
symm_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool SymmOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing Symm arguments
symm_workspace_.arguments.A = symm_workspace_.A->data();
symm_workspace_.arguments.B = symm_workspace_.B->data();
symm_workspace_.arguments.C = symm_workspace_.C->data();
symm_workspace_.arguments.D = symm_workspace_.Computed->data();
symm_workspace_.arguments.alpha = problem_.alpha.data();
symm_workspace_.arguments.beta = problem_.beta.data();
symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&symm_workspace_.arguments,
symm_workspace_.host_workspace.data(),
symm_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/symm_operation_profiler.cu/0 | {
"file_path": "tools/profiler/src/symm_operation_profiler.cu",
"repo_id": "tools",
"token_count": 9432
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief utils code for device cutlass code
*/
#pragma once
#include <cuda_fp16.h>
#include <float.h>
#define FINAL_MASK 0xffffffff
struct half4 {
half x, y, z, w;
};
template<typename T, int NUM>
__inline__ __device__ T warpReduceSum(T* val)
{
#pragma unroll
for (int i = 0; i < NUM; i++) {
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1)
val[i] += __shfl_xor_sync(FINAL_MASK, val[i], mask, 32);
}
return (T)(0.0f);
}
template<typename T, int NUM>
__inline__ __device__ T blockReduceSum(T* val)
{
__shared__ T shared[NUM][33];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
warpReduceSum<T, NUM>(val);
if (lane == 0) {
#pragma unroll
for (int i = 0; i < NUM; i++) {
shared[i][wid] = val[i];
}
}
__syncthreads();
bool is_mask = threadIdx.x < (blockDim.x / 32.f);
#pragma unroll
for (int i = 0; i < NUM; i++) {
val[i] = is_mask ? shared[i][lane] : (T)(0.0f);
}
warpReduceSum<T, NUM>(val);
return (T)0.0f;
}
template<typename T, int NUM>
__inline__ __device__ T warpReduceMax(T* val)
{
#pragma unroll
for (int i = 0; i < NUM; i++) {
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1)
val[i] = max(val[i], __shfl_xor_sync(FINAL_MASK, val[i], mask, 32));
}
return (T)(0.0f);
}
template<typename T, int NUM>
__inline__ __device__ T blockReduceMax(T* val)
{
static __shared__ T shared[32][NUM];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
warpReduceMax<T, NUM>(val); // get maxx in each warp
if (lane == 0) // record in-warp maxx by warp Idx
{
#pragma unroll
for (int i = 0; i < NUM; i++) {
shared[wid][i] = val[i];
}
}
__syncthreads();
// Modify from blockDim.x << 5 to blockDim.x / 32. to prevent
// blockDim.x is not divided by 32
bool is_mask = threadIdx.x < (blockDim.x / 32.f);
#pragma unroll
for (int i = 0; i < NUM; i++) {
val[i] = is_mask ? shared[lane][i] : (T)(-FLT_MAX);
}
warpReduceMax<T, NUM>(val);
return (T)0.0f;
}
| tools/util/include/cutlass/util/device_utils.h/0 | {
"file_path": "tools/util/include/cutlass/util/device_utils.h",
"repo_id": "tools",
"token_count": 1513
} | 61 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for GEMM in host-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/mma.h"
#include "cutlass/util/host_tensor.h"
namespace cutlass {
namespace reference {
namespace host {
template<typename Out, typename In>
struct CastIfScalar {
static Out cast(In in) {
return Out(in);
}
};
template<typename OutScalar, typename In>
struct CastIfScalar<cutlass::complex<OutScalar>, In> {
typedef cutlass::complex<OutScalar> Out;
static Out cast(In in) {
return Out(static_cast<OutScalar>(in));
}
};
template<typename OutScalar, typename InScalar>
struct CastIfScalar<cutlass::complex<OutScalar>, cutlass::complex<InScalar>> {
typedef cutlass::complex<OutScalar> Out;
typedef cutlass::complex<InScalar> In;
static Out cast(In in) {
return Out(in);
}
};
template<typename Out, typename In>
Out cast_if_scalar(In in) {
return CastIfScalar<Out, In>::cast(in);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
int const K = problem_size.k();
// Blocking necessary to speedup reference implementation
int const Mblock = 16;
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
for (int row_block = 0; row_block < M; row_block += Mblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Mblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N) {
ElementA a = tensor_a.at(MatrixCoord(row, k_block));
ElementB b = tensor_b.at(MatrixCoord(k_block, col));
ComputeType compute_a(cast_if_scalar<ComputeType>(a));
ComputeType compute_b(cast_if_scalar<ComputeType>(b));
accum[i][j] = inner_product_op(compute_a, compute_b, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N) {
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[i][j]) +
beta * ScalarType(tensor_c.at(coord)));
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum) {
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, InnerProductOp, ConvertOp>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c,
initial_accum);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = cutlass::arch::OpMultiplyAdd
>
struct Gemm;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpMultiplyAdd> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpMultiplyAddFastBF16> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add-saturate
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpMultiplyAddSaturate> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>,
NumericConverterClamp<ElementC, ScalarType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>,
NumericConverterClamp<ElementC, ScalarType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for XOR-popc
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpXorPopc> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, xor_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, xor_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
/// Partial specialization for AND-popc
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpAndPopc> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, and_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, and_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpMultiplyAddFastF32> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Batched GEMM
//
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a batch of GEMMs over a set of matrices of common dimension.
//
// TensorRefCollection* is a type satisfying the TensorRefCollection concept.
//
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType
>
void BatchedGemm(
gemm::GemmCoord problem_size,
int batch_count,
ScalarType alpha,
TensorRefCollectionA const& tensor_a,
TensorRefCollectionB const& tensor_b,
ScalarType beta,
TensorRefCollectionC &tensor_c,
AccumulatorType initial_accum) {
typename TensorRefCollectionA::ConstIterator tensor_a_it = tensor_a.begin();
typename TensorRefCollectionB::ConstIterator tensor_b_it = tensor_b.begin();
typename TensorRefCollectionC::ConstIterator tensor_c_it = tensor_c.begin();
for (int batch = 0;
batch < batch_count;
++batch, ++tensor_a_it, ++tensor_b_it, ++tensor_c_it) {
Gemm<typename TensorRefCollectionA::Element,
typename TensorRefCollectionA::Layout,
typename TensorRefCollectionB::Element,
typename TensorRefCollectionB::Layout,
typename TensorRefCollectionC::Element,
typename TensorRefCollectionC::Layout,
typename TensorRefCollectionC::Element,
typename TensorRefCollectionC::Element>
gemm;
gemm(problem_size, alpha, *tensor_a_it, *tensor_b_it, beta, *tensor_c_it,
initial_accum);
}
}
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
//
// TensorRefCollection* is a type satisfying the TensorRefCollection concept.
//
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType
>
void BatchedGemm(
gemm::GemmCoord problem_size,
int batch_count,
ScalarType alpha,
TensorRefCollectionA const& tensor_a,
TensorRefCollectionB const& tensor_b,
ScalarType beta,
TensorRefCollectionC &tensor_c) {
BatchedGemm(problem_size, batch_count, alpha, tensor_a, tensor_b, beta, tensor_c, ScalarType(0));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/host/gemm.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/gemm.h",
"repo_id": "tools",
"token_count": 8247
} | 62 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Planar Complex GEMM
This example demonstrates the CUTLASS Library's exposure of planar complex GEMM kernels supporting
the batched strided mode.
These kernels represent complex matrices by storing the real and imaginary parts of the matrix in
disjoint regions in memory. These real-valued matrices are stored using existing cuBLAS layouts
as either column-major or row-major layouts with a single leading dimension indicating the stride
between columns or rows.
The CUTLASS Library collects multiple template instantiations in a data structure and offers
a BLAS-like dispatch API to invoke the appropriate kernel on the Volta or Turing architectures.
CUTLASS decouples matrix layout from complex transformation, so four possible transformations
are possible on the A and B operands:
n: column-major
c: column-major complex conjugate
t: row-major
h: row-major complex conjugate
The CUTLASS Library contains many kernel instances specialized for architecture, data type, tile
size, and alignment. This can result in long compile times.
To build strictly the planar complex kernels needed for general application, execute the following
CMake command in an empty build directory.
$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" \
-DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_*gemm_planar_complex
This builds all planar complex GEMM variants for Volta and Turing architectures.
To build strictly the kernels needed for this example, an even narrower filter string may be
specified as follows. This only builds planar complex GEMMs targeting Tensor Cores for
the 'CN' layout configuration (conjugate A operand with both A and B as column-major).
$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" \
-DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_f16_s*gemm_planar_complex_f16*cn
$ make 10_planar_complex
$ ./examples/10_planar_complex/10_planar_complex --m=2048 --n=1024 --k=512 --batch=10
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor_planar_complex.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/device/gemm_planar_complex.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/library/handle.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
cutlass::complex<float> alpha;
cutlass::complex<float> beta;
bool reference_check;
int iterations;
Options():
help(false),
problem_size({1024, 1024, 1024}),
batch_count(1),
reference_check(true),
iterations(20),
alpha(1),
beta() { }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("batch", batch_count);
cmd.get_cmd_line_argument("alpha", alpha.real());
cmd.get_cmd_line_argument("alpha_i", alpha.imag());
cmd.get_cmd_line_argument("beta", beta.real());
cmd.get_cmd_line_argument("beta_i", beta.imag());
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "10_planar_complex example\n\n"
<< " This example uses the CUTLASS Library to execute Planar Complex GEMM computations.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --batch=<int> Number of GEMM operations executed in one batch\n"
<< " --alpha=<f32> Epilogue scalar alpha (real part)\n"
<< " --alpha_i=<f32> Epilogue scalar alpha (imaginary part)\n"
<< " --beta=<f32> Epilogue scalar beta (real part)\n\n"
<< " --beta_i=<f32> Epilogue scalar beta (imaginary part)\n\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/10_planar_complex/10_planar_complex --batch=7 --m=1024 --n=512 --k=1024 \\\n"
<< " --alpha=2 --alpha_i=-2 --beta=0.707 --beta_i=-.707\n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product() * batch_count * 4;
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Performance test environment for planar complex
class TestbedPlanarComplex {
public:
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = cutlass::half_t;
using LayoutC = cutlass::layout::ColumnMajor;
using ElementCompute = float;
using ElementAccumulator = float;
//
// Data members
//
cutlass::library::Handle handle;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
cutlass::DeviceAllocation<ElementA> tensor_A;
cutlass::DeviceAllocation<ElementB> tensor_B;
cutlass::DeviceAllocation<ElementC> tensor_C;
cutlass::DeviceAllocation<ElementC> tensor_D;
cutlass::DeviceAllocation<ElementC> tensor_D_ref;
//
// Methods
//
TestbedPlanarComplex(
Options const &options
):
problem_size(options.problem_size), batch_count(options.batch_count) {
// Allocate device memory for batched strided GEMM
tensor_A.reset(int64_t(problem_size.m()) * problem_size.k() * batch_count * 2);
tensor_B.reset(int64_t(problem_size.k()) * problem_size.n() * batch_count * 2);
tensor_C.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
tensor_D.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
tensor_D_ref.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
}
void initialize() {
uint64_t seed = 1073;
// Use small integers to simplify correctness checking
int scope_max = 6;
int scope_min = -6;
cutlass::reference::device::BlockFillRandomUniform(
tensor_A.get(), tensor_A.size(), seed, ElementA(scope_max), ElementA(scope_min), 0);
cutlass::reference::device::BlockFillRandomUniform(
tensor_B.get(), tensor_B.size(), seed * 2019, ElementB(scope_max), ElementB(scope_min), 0);
cutlass::reference::device::BlockFillRandomUniform(
tensor_C.get(), tensor_C.size(), seed * 2020, ElementC(scope_max), ElementC(scope_min), 0);
}
Result profile(Options const &options) {
Result result;
initialize();
ElementA *ptr_A = tensor_A.get();
ElementB *ptr_B = tensor_B.get();
ElementC *ptr_C = tensor_C.get();
ElementC *ptr_D = tensor_D.get();
int64_t batch_stride_A = int64_t(problem_size.m()) * problem_size.k() * 2;
int64_t batch_stride_B = int64_t(problem_size.k()) * problem_size.n() * 2;
int64_t batch_stride_C = int64_t(problem_size.m()) * problem_size.n() * 2;
int64_t batch_stride_D = int64_t(problem_size.m()) * problem_size.n() * 2;
typename LayoutA::Stride::Index lda = LayoutA::packed({problem_size.m(), problem_size.k()}).stride(0);
typename LayoutB::Stride::Index ldb = LayoutB::packed({problem_size.k(), problem_size.n()}).stride(0);
typename LayoutC::Stride::Index ldc = LayoutC::packed({problem_size.m(), problem_size.n()}).stride(0);
typename LayoutC::Stride::Index ldd = LayoutC::packed({problem_size.m(), problem_size.n()}).stride(0);
int64_t imag_stride_A = int64_t(problem_size.m()) * problem_size.k();
int64_t imag_stride_B = int64_t(problem_size.k()) * problem_size.n();
int64_t imag_stride_C = int64_t(problem_size.m()) * problem_size.n();
int64_t imag_stride_D = int64_t(problem_size.m()) * problem_size.n();
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMMs
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
//
// Execute the planar complex GEMM kernel via the CUTLASS Library's
// dispatch routines.
//
// Note, for planar complex GEMM kernels, all numeric type arguments
// specify the data type of the base real types. These are understood to
// apply to planar complex representations of matrices in memory and to complex<T>
// structures for scalars.
//
// See tools/library/include/cutlass/library/handle.h for more details.
//
result.status = handle.gemm_planar_complex(
problem_size.m(), // GEMM M dimension
problem_size.n(), // GEMM N dimension
problem_size.k(), // GEMM K dimension
cutlass::library::NumericTypeID::kF32, // Base data type of complex-valued accumulation
cutlass::library::NumericTypeID::kF32, // Base data type of complex-valued alpha/beta scalars
&options.alpha, // Pointer to alpha scalar, of type complex<T>
cutlass::library::NumericTypeID::kF16, // Base data type of complex-valued A matrix
cutlass::library::LayoutTypeID::kColumnMajor, // Layout of A matrix
cutlass::library::ComplexTransform::kConjugate, // Complex transformation on A matrix operand
ptr_A, // Pointer to real part of A matrix
ptr_A + imag_stride_A, // Pointer to imaginary part of A matrix
lda, // Leading dimension of real part of A matrix
lda, // Leading dimension of imaginary part of A matrix
cutlass::library::NumericTypeID::kF16, // Base data type of complex-valued B matrix
cutlass::library::LayoutTypeID::kColumnMajor, // Layout of B matrix
cutlass::library::ComplexTransform::kNone, // Complex transformation on B matrix operand
ptr_B, // Pointer to real part of B matrix
ptr_B + imag_stride_B, // Pointer to imaginary part of B matrix
ldb, // Leading dimension of real part of B matrix
ldb, // Leading dimension of imaginary part of B matrix
&options.beta, // Pointer to beta scalar, of type complex<T>
cutlass::library::NumericTypeID::kF16, // Base data type of complex valued C and D matrices
ptr_C, // Pointer to real part of C matrix
ptr_C + imag_stride_C, // Pointer to imaginary part of C matrix
ldc, // Leading dimension of real part of C matrix
ldc, // Leading dimension of imaginary part of C matrix
ptr_D, // Pointer to real part of D matrix
ptr_D + imag_stride_D, // Pointer to imaginary part of D matrix
ldd, // Leading dimension of real part of D matrix
ldd, // Leading dimension of imaginary part of D matrix
batch_count, // Number of batched elements
batch_stride_A, // Stride between batches of real parts of A matrix
batch_stride_A, // Stride between batches of imaginary parts of A matrix
batch_stride_B, // Stride between batches of real parts of B matrix
batch_stride_B, // Stride between batches of imaginary parts of B matrix
batch_stride_C, // Stride between batches of real parts of C matrix
batch_stride_C, // Stride between batches of imaginary parts of C matrix
batch_stride_D, // Stride between batches of real parts of D matrix
batch_stride_D // Stride between batches of imaginary parts of D matrix
);
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS internal error - configuration not supported" << std::endl;
return result;
}
}
//
// Stop profiling loop
//
// Record an event when the GEMMs are complete
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
if (handle.get_last_operation()) {
std::cout << "Recently executed '" << handle.get_last_operation()->description().name << "'" << std::endl;
}
//
// Compute reference in device code
//
if (options.reference_check) {
result.passed = true;
for (int64_t idx = 0; result.passed && idx < int64_t(batch_count); ++idx) {
cutlass::reference::device::GemmPlanarComplex<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator
>(
problem_size,
options.alpha,
{tensor_A.get() + idx * batch_stride_A, lda, imag_stride_A},
cutlass::ComplexTransform::kConjugate,
{tensor_B.get() + idx * batch_stride_B, ldb, imag_stride_B},
cutlass::ComplexTransform::kNone,
options.beta,
{tensor_C.get() + idx * batch_stride_C, ldc, imag_stride_C},
{tensor_D_ref.get() + idx * batch_stride_D, ldd, imag_stride_D}
);
ElementC epsilon = 0.1_hf;
ElementC nonzero_floor = 0.1_hf;
result.passed = cutlass::reference::device::BlockCompareRelativelyEqual(
tensor_D.get() + idx * batch_stride_D,
tensor_D_ref.get() + idx * batch_stride_D,
batch_stride_D,
epsilon,
nonzero_floor
);
}
if (result.passed) {
std::cout << "Reference check passed." << std::endl;
}
else {
std::cerr << "Error - reference check failed." << std::endl;
}
}
std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " GFLOPs: " << result.gflops << std::endl;
return result;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
//
// This example uses mma.sync to directly access Tensor Cores to achieve peak performance.
//
// Volta Tensor Core operations are first available in CUDA 10.1 Toolkit.
//
// Turing Tensor Core operations are first available in CUDA 10.2 Toolkit.
//
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (props.major < 7) {
std::cerr << "Volta Tensor Core operations must be run on a machine with compute capability at least 70."
<< std::endl;
// Returning zero so this test passes on older architectures even though its actions are no-op.
return 0;
}
else if (props.major == 7 && props.minor <= 2) {
//
// If running on the Volta architecture, at least CUDA 10.1 Toolkit is required to run this example.
//
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Volta Tensor Core operations must be compiled with CUDA 10.1 Toolkit or later." << std::endl;
// Returning zero so this test passes on older Toolkits even though its actions are no-op.
return 0;
}
}
else if (props.major == 7 && props.minor >= 5) {
//
// If running on the Turing architecture, at least CUDA 10.2 Toolkit is required to run this example.
//
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
// Returning zero so this test passes on older Toolkits even though its actions are no-op.
return 0;
}
}
else {
// NVIDIA Ampere Architecture GPUs (SM80 and later) are fully supported on CUDA 11 Toolkit and beyond.
//
// fall through
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
TestbedPlanarComplex testbed(options);
Result result = testbed.profile(options);
return result.passed ? 0 : -1;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/10_planar_complex/planar_complex.cu/0 | {
"file_path": "examples/10_planar_complex/planar_complex.cu",
"repo_id": "examples",
"token_count": 8729
} | 0 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape0_,
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape1_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy0_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy1_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class B2bMmaBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape0 = Shape0_;
using Shape1 = Shape1_;
///< Policy describing tuning details
using Policy0 = Policy0_;
using Policy1 = Policy1_;
//
// Dependent types
//
/// Warp-level Mma
using Operator0 = typename Policy0::Operator;
using Operator1 = typename Policy1::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm0 = typename Policy0::Operator::Shape;
using WarpGemm1 = typename Policy1::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount0 = GemmShape<Shape0::kM / WarpGemm0::kM,
Shape0::kN / WarpGemm0::kN,
Shape0::kK / WarpGemm0::kK>;
using WarpCount1 = GemmShape<Shape1::kM / WarpGemm1::kM,
Shape1::kN / WarpGemm1::kN,
Shape1::kK / WarpGemm1::kK>;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations0 =
(WarpGemm0::kK / Operator0::Policy::MmaShape::kK);
static int const kWarpGemmIterations1 =
(WarpGemm1::kK / Operator1::Policy::MmaShape::kK);
/// Number of stages
static int const kStages = Stages;
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
template<
typename Shape_,
typename Policy_
>
class SharedStorage {
public:
//
// Type definitions
//
using Shape = Shape_;
using Policy = Policy_;
using Operator = typename Policy::Operator;
/// Tensor reference to the A operand
using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;
/// Tensor reference to the B operand
using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,
Shape::kK * kStages +
Policy::SmemPaddingA::kColumn>;
/// Shape of the B matrix operand in shared memory
using ShapeB =
MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow,
Shape::kN + Policy::SmemPaddingB::kColumn>;
public:
//
// Data members
//
/// Buffer for A operand
AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A;
/// Buffer for B operand
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;
public:
//
// Methods
//
/// Returns a layout object for the A matrix
CUTLASS_DEVICE
static typename Operator::LayoutA LayoutA() {
return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator::LayoutB LayoutB() {
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});
}
/// Returns a TensorRef to the A operand
CUTLASS_HOST_DEVICE
TensorRefA operand_A_ref() {
return TensorRefA{operand_A.data(), LayoutA()};
}
/// Returns a TensorRef to the B operand
CUTLASS_HOST_DEVICE
TensorRefB operand_B_ref() {
return TensorRefB{operand_B.data(), LayoutB()};
}
};
using SharedStorage0 = SharedStorage<Shape0, Policy0>;
using SharedStorage1 = SharedStorage<Shape1, Policy1>;
union B2bMmaSharedStorage {
SharedStorage0 shared_storage0;
SharedStorage1 shared_storage1;
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A0 operand from shared memory
typename Operator0::IteratorA warp_tile_iterator_A0_;
/// Iterator to load a warp-scoped tile of B0 operand from shared memory
typename Operator0::IteratorB warp_tile_iterator_B0_;
/// Iterator to load a warp-scoped tile of B1 operand from shared memory
typename Operator1::IteratorB warp_tile_iterator_B1_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
B2bMmaBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
B2bMmaSharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
warp_tile_iterator_A0_(shared_storage.shared_storage0.operand_A_ref(), lane_idx),
warp_tile_iterator_B0_(shared_storage.shared_storage0.operand_B_ref(), lane_idx),
warp_tile_iterator_B1_(shared_storage.shared_storage1.operand_B_ref(), lane_idx) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base.h",
"repo_id": "examples",
"token_count": 2697
} | 1 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run CUTLASS's convolution kernels
based on the Implicit GEMM algorithm, that use the Tensor Cores
on an NVIDIA Ampere GPU.
Writing a single high-performance convolution kernel is hard enough,
let alone writing kernels that perform well for multiple problem sizes
and use good software abstractions.
CUTLASS provides simplified abstractions
to compose multiple sections of a convolution kernel.
When used properly, the kernels can reach peak GPU performance.
CUTLASS divides a kernel into hierarchical composable sections
for each level of the GPU hardware hierarchy:
thread, warp, and threadblock.
Each section computes on its own tile shape,
with each higher level's tile shape
being composed from lower-level tile shapes.
Multiple thread tiles (the tile shape each thread computes)
can be used to form warp tiles (the tile shape each warp computes),
and multiple warp tiles can be used to compute threadblock tiles
(the tile shape computed by a threadblock).
In this example, we split variable initialization into two parts.
1. Setting up data properties: describes how tensors are laid out in the memory
and how the kernel can view them (logical to physical mapping)
2. Setting up computation properties: describes how the above tensors
will be used to compute the output of convolution
We begin by setting up the data types
of all the input and output elements of a convolution.
A convolution computes
C = alpha * Conv2dFprop(A, B) + beta * C,
so we set up data types for the input tensor A,
weights tensor B, output tensor C,
and the scaling factors alpha and beta.
CUTLASS divides the convolution into two parts:
the "mainloop" that computes X = Conv2dFprop(A, B),
and the "epilogue" that computes C = alpha * X + beta * C.
The epilogue is an element-wise operation on X and C.
In this case, it is a linear combination,
but other epilogues are possible.
In this example, we want
* the scaling factors alpha and beta to be float,
* the elements of A and B to be cutlass::half_t
(a 16-bit floating-point type),
* the elements of C to be float, and
* intermediate sums to be accumulated in float.
We convey this to the CUTLASS kernel
by setting the following template parameters.
* alpha and beta: ElementComputeEpilogue = float
* Elements of input tensor A: ElementInputA = cutlass::half_t
* Elements of input tensor B: ElementInputB = cutlass::half_t
* Elements of output tensor C: ElementOutput = float
* Accumulation type: ElementAccumulator = float
Next, we describe the layout of the input and output tensors.
We convey this to the CUTLASS kernel
by setting the following template parameters.
* Layout of input tensor A: LayoutInputA = TensorNHWC
* Layout of input tensor B: LayoutInputB = TensorNHWC
* Layout of output tensor C: LayoutOutput = TensorNHWC
After that, we set up rules to compute the epilogue.
The epilogue in this case is a simple linear combination
C = alpha * X + beta * C.
Thus, we set the kernel's template parameter EpilogueOp
to LinearCombination. LinearCombination itself
has template parameters:
* the element type of the output tensor (ElementOutput),
* the number of elements per vector memory access (8),
* the data type of the accumulator (ElementAccumulator),
* and the data type used to compute the linear combination
(ElementComputeEpilogue).
We then define the tile shapes
that each level of the computation uses.
We define these as types that encode the tile shapes
as compile-time integer values.
Each shape expresses the dimensions M x N x K.
Here, the letters refer to the dimensions
of a matrix-matrix multiply.
* ThreadblockShape defines the threadblock tile shape
as 128 x 128 x 64.
* WarpShape defines the warp tile shape as 64 x 64 x 64.
* InstructionShape defines the MMA
(matrix multiply-accumulate) operation shape
as 16 x 8 x 16.
These types become template arguments
of the kernel properties type
cutlass::conv::kernel::DefaultConv2dFprop.
The kernel uses these shapes to deduce
the number of threads needed per threadblock,
the required amount of shared memory,
the internal layouts needed to access
shared memory without bank conflicts,
and many other properties that the kernel needs
for good performance.
CUTLASS deduces all these properties automatically,
so that users don't have to.
DefaultConv2dFprop accepts other template parameters
that describe things like the target CUDA SM architecture.
CUTLASS also supports multiple MMA pipelines in a threadblock.
An MMA pipeline constitutes the whole process
of loading input data from global memory to shared memory,
loading data from shared memory to registers,
doing matrix multiplication,
and storing the result to global memory.
The below flow sequence shows a typical MMA multistage pipeline
(see include/cutlass/conv/threadblock/implicit_gemm_multistage.h).
tensor in global memory
--cp_async-->
tile in shared memory
--smem loads-->
registers
--mma-->
registers
--global stores-->
output to global memory
On NVIDIA Ampere, the kernel uses `cp_async`
to build a multistage software pipeline.
This helps it better hide latency.
At this point, we can define the actual CUTLASS kernel type
as the alias ImplicitGemm, a specialization of
cutlass::conv::device::ImplicitGemmConvolution.
The latter accepts the kernel properties type alias
Conv2dFpropKernel as its one template argument.
This example then sets up a test problem
and arguments to the kernel.
We use CUTLASS utilities to allocate
the input and output tensors
and fill them with sample input data.
We then create the kernel arguments
as an instance of ImplicitGemm::Arguments.
The arguments include
the problem size (N = 1, H = 64, W = 64, C = 128),
filter size (K = 64, R = 3, S = 3, C = 128),
padding, strides, dilation, tensors, alpha, beta,
and the split k-dimension factor.
We also query CUTLASS if the kernel we instantiated
requires any memory for scratch space.
If yes, we reserve scratch space and pass it along
with other arguments to initialize the CUTLASS kernel.
After lauching the CUTLASS kernel, this example runs
a reference convolution kernel (from CUTLASS utilities)
to check correctness.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// Data types for input and output tensors
// and computation between elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// Whether to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// SM architecture number
using SmArch = cutlass::arch::Sm80;
// Threadblock tile shape
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>;
// Warp tile shape
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
// MMA (Tensor Core instruction, in this case) tile shape
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
// How the kernel schedules threadblocks
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipeline stages to use
constexpr int NumStages = 3;
// Which iterator algorithm to use: Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// Is the output packed or strided
// Use kStride if using strided output
static cutlass::conv::StrideSupport const OutputStride = cutlass::conv::StrideSupport::kUnity;
// The epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
// Kernel properties type
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm,
OutputStride
>::Kernel;
// Type of the actual kernel
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(false),
measure_performance(true),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify that the problem size is compatible with CUTLASS's convolution implementation
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Update input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parse command-line arguments
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Print an explanation of the command-line arguments
std::ostream & print_usage(std::ostream &out) const {
out << "16_ampere_tensorop_conv2dfprop example\n\n"
<< " This example uses Ampere's Tensor Core operators on F16 data types\n"
<< " to compute forward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/16_ampere_tensorop_conv2dfprop/16_ampere_tensorop_conv2dfprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/16_ampere_tensorop_conv2dfprop/16_ampere_tensorop_conv2dfprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in Gflop/s
///
/// Gflop/s stands for billions (10^9) of
/// floating-point operations per second (Gflop/s).
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream& print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniformly distributed random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0);
// Fill tensor B on host with uniformly distributed random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C on host with uniformly distributed random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0);
// Fill tensor D on host with zeros
cutlass::reference::host::TensorFill(
tensor_d.host_view());
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
// Construct ImplicitGemm::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on host...\n";
// Compute with reference implementation
cutlass::reference::host::Conv2dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator
>(
problem_size,
tensor_a.host_ref(),
tensor_b.host_ref(),
tensor_c.host_ref(),
tensor_ref_d.host_ref(),
options.alpha,
options.beta
);
// Check if CUTLASS kernel and reference kernel produced the same output
tensor_d.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "16_ampere_workspace_conv2dfprop_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device.
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime.
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average run time and floating-point throughput (Gflop/s).
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {1, 32, 64, 128, 256, 512};
struct Benchmark {
int h, w, c, k, r, s;
} layers[] = {
{56, 56, 64, 256, 1, 1},
{56, 56, 64, 64, 1, 1},
{56, 56, 64, 64, 3, 3},
{56, 56, 256, 64, 1, 1},
{56, 56, 256, 512, 1, 1},
{56, 56, 256, 128, 1, 1},
{28, 28, 128, 128, 3, 3},
{28, 28, 128, 512, 1, 1},
{28, 28, 512, 128, 1, 1},
{28, 28, 512, 1024, 1, 1},
{28, 28, 512, 256, 1, 1},
{14, 14, 256, 256, 3, 3},
{14, 14, 256, 1024, 1, 1},
{14, 14, 1024, 256, 1, 1},
{14, 14, 1024, 2048, 1, 1},
{14, 14, 1024, 512, 1, 1},
{7, 7, 512, 512, 3, 3},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
| examples/16_ampere_tensorop_conv2dfprop/ampere_tensorop_conv2dfprop.cu/0 | {
"file_path": "examples/16_ampere_tensorop_conv2dfprop/ampere_tensorop_conv2dfprop.cu",
"repo_id": "examples",
"token_count": 10130
} | 2 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief GEMM Grouped Example.
This workload computes a batch of GEMM operations with distinct problem sizes. Pointers to matrices
in Global Memory are passed to the kernel in array (also held in Global Memory). Similarly,
leading dimensions and problem sizes are stored in arrays in GMEM.
This differs from "Batched Array" GEMM because the size of each GEMM problem in the Grouped GEMM
concept may be distinct.
This benchmark program initializes a workspace with random problem sizes for a given number of
groups. Command line options enable overriding M, N, and/or K dimensions with uniform values to
model problems more similar to the traditional batched GEMM.
Additionally, problem sizes are collected and binned to compute the same problem as a series of
conventional batched GEMMs (setup for this problem is not timed). This demonstrates the performance
enhancement achieved by implementing a specialized grouped GEMM kernel.
Examples:
# Runs a grouped GEMM with 100 random problem sizes
$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100
# Runs a grouped GEMM with 100 random problem sizes (with GEMM-K dimension equal to 1024)
$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100 --k=1024 --verbose=true
# Runs a grouped GEMM that is equivalent to a batched GEMM
$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100 --m=2048 --n=1024 --k=1024 --verbose=true
# Execute Grouped GEMM and profile with NSight
$ nv-nsight-cu-cli ./examples/24_gemm_grouped/24_gemm_grouped --m=256 --n=256 --k=256 --verbose=true \
--iterations=1 --reference-check=false
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <chrono>
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <map>
#include <unordered_map>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_grouped.h"
#include "cutlass/gemm/kernel/default_gemm_grouped.h"
#include "cutlass/gemm/device/gemm_grouped.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/device/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double initialization_time_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double initialization_time_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), initialization_time_ms(initialization_time_ms), gflops(gflops),
status(status), error(error), passed(true) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Hash function for cutlass::gemm::GemmCoord
struct HashGemmCoord {
size_t operator()(cutlass::gemm::GemmCoord const &problem) const {
std::hash<int> hasher;
return (hasher(problem.m() * 3)) ^ (hasher(1 + problem.n() * 5)) ^ (hasher(2 + problem.k() * 7));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
bool error;
bool reference_check;
bool profile_initialization;
bool sort_problems;
std::vector<cutlass::gemm::GemmCoord> problem_sizes;
// problem size bins
std::unordered_map<
cutlass::gemm::GemmCoord,
std::vector<int32_t>,
HashGemmCoord> problem_bins;
int alignment;
int problem_count;
int iterations;
int cuda_streams;
bool verbose;
float alpha;
float beta;
std::string benchmark_path;
std::string output_tag;
std::ofstream output_file;
using GroupScheduleMode = cutlass::gemm::kernel::GroupScheduleMode;
std::vector<GroupScheduleMode> scheduler_modes;
std::unordered_map<std::string, GroupScheduleMode>
str_to_scheduler_mode = {
{"kDeviceOnly", GroupScheduleMode::kDeviceOnly},
{"kHostPrecompute", GroupScheduleMode::kHostPrecompute}
};
struct GroupScheduleModeHash {
size_t operator()(GroupScheduleMode m) const {
return static_cast<size_t>(m);
}
};
std::unordered_map<GroupScheduleMode, std::string, GroupScheduleModeHash>
scheduler_mode_to_str = {
{GroupScheduleMode::kDeviceOnly, "kDeviceOnly"},
{GroupScheduleMode::kHostPrecompute, "kHostPrecompute"}
};
std::vector<GroupScheduleMode> all_scheduler_modes = {GroupScheduleMode::kDeviceOnly, GroupScheduleMode::kHostPrecompute};
//
// Methods
//
Options():
help(false),
error(false),
alignment(8),
reference_check(true),
profile_initialization(false),
sort_problems(false),
problem_count(15),
iterations(20),
cuda_streams(0),
verbose(false),
alpha(1),
beta(),
scheduler_modes({GroupScheduleMode::kDeviceOnly})
{ }
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("alignment", alignment, 8);
cmd.get_cmd_line_argument("groups", problem_count, 15);
cmd.get_cmd_line_argument("alpha", alpha, 1.0f);
cmd.get_cmd_line_argument("beta", beta, 0.0f);
cmd.get_cmd_line_argument("iterations", iterations, 20);
cmd.get_cmd_line_argument("streams", cuda_streams, 0);
cmd.get_cmd_line_argument("verbose", verbose, false);
cmd.get_cmd_line_argument("reference-check", reference_check, true);
cmd.get_cmd_line_argument("profile-initialization", profile_initialization, false);
cmd.get_cmd_line_argument("sort-problems", sort_problems, false);
cmd.get_cmd_line_argument("benchmark", benchmark_path);
std::vector<std::string> scheduler_mode_strs;
cmd.get_cmd_line_arguments("scheduler-modes", scheduler_mode_strs);
if (!scheduler_mode_strs.empty()) {
scheduler_modes.clear();
if (scheduler_mode_strs.size() == 1 && scheduler_mode_strs[0] == "all") {
scheduler_modes = all_scheduler_modes;
} else {
for (std::string precomp_str : scheduler_mode_strs) {
auto it = str_to_scheduler_mode.find(precomp_str);
if (it != str_to_scheduler_mode.end()) {
scheduler_modes.push_back(it->second);
} else if (precomp_str == "all") {
std::cerr << "Flag --scheduler-modes=all must not contain other scheduler modes in list." << std::endl;
error = true;
return;
} else {
std::cerr << "Unrecognized scheduler mode '" << precomp_str << "'" << std::endl;
error = true;
return;
}
}
}
}
std::string output_path;
cmd.get_cmd_line_argument("tag", output_tag);
cmd.get_cmd_line_argument("output_file", output_path);
if (!output_path.empty()) {
std::ios_base::openmode open_mode = std::ios_base::out;
std::ifstream input_file(output_path.c_str());
if (input_file.good()) {
open_mode = std::ios_base::app;
input_file.close();
}
output_file.open(output_path.c_str(), open_mode);
if (output_file.good() && open_mode != std::ios_base::app) {
output_file << "Tag,Provider,Kind,Groups,Runtime,GFLOPs\n";
}
}
// Decide how to initialize the problems
if (!benchmark_path.empty()) {
if (!benchmark_problems()) {
error = true;
problem_sizes.clear();
return;
}
}
else {
randomize_problems(cmd);
}
// Post-process the problem sizes
bin_problems();
}
void randomize_problems(cutlass::CommandLine &cmd) {
//
// For now, randomly choose the problem sizes.
//
int cmd_line_m = -1;
int cmd_line_n = -1;
int cmd_line_k = -1;
cmd.get_cmd_line_argument("m", cmd_line_m);
cmd.get_cmd_line_argument("n", cmd_line_n);
cmd.get_cmd_line_argument("k", cmd_line_k);
problem_sizes.reserve(problem_count);
for (int i = 0; i < problem_count; ++i) {
int m = cmd_line_m;
int n = cmd_line_n;
int k = cmd_line_k;
if (m < 1) {
m = alignment * ((rand() % 256) + 1);
}
if (n < 1) {
n = alignment * ((rand() % 256) + 1);
}
if (k < 1) {
k = alignment * ((rand() % 256) + 1);
}
cutlass::gemm::GemmCoord problem(m, n, k);
problem_sizes.push_back(problem);
}
}
/// Load a benchmark
bool benchmark_problems() {
std::ifstream file(benchmark_path);
if (!file.good()) {
return false;
}
while (file.good()) {
int idx = -1;
std::string extent_str;
file >> idx >> extent_str;
if (idx < 0 || extent_str.empty()) {
break;
}
cutlass::gemm::GemmCoord extent;
std::vector<std::string> tokens;
cutlass::CommandLine::tokenize(tokens, extent_str, 'x');
for (int i = 0; i < int(tokens.size()); ++i) {
int x = std::atoi(tokens.at(i).c_str());
// round up
if (x % alignment) {
x += (alignment - (x % alignment));
}
extent.at(i) = x;
}
if (extent.product()) {
problem_sizes.push_back(extent);
}
}
return true;
}
/// Post processes the problems
void bin_problems() {
problem_bins.clear();
problem_count = int(problem_sizes.size());
//
// Insert the problem sizes into a sorted container class. This is *NOT* necessary
// to run the CUTLASS kernel, but it enables the execution of cublas's batched GEMM.
//
for (int i = 0; i < int(problem_sizes.size()); ++i) {
auto it = problem_bins.find(problem_sizes.at(i));
if (it == problem_bins.end()) {
problem_bins.insert({problem_sizes.at(i), std::vector<int32_t>({i}) });
}
else {
it->second.push_back(i);
}
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "24_gemm_grouped\n\n"
<< " This example profiles the performance of a 'grouped' GEMM kernel. This is similar to batched GEMM\n"
<< " in that multiple, independent GEMMs are computed by one grid launch. It differs in that each\n"
<< " 'group' may compute a unique problem size. Problem sizes and pointers to matrices are both stored\n"
<< " in device Global Memory and loaded by the kernel.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --benchmark=<str> Executes a benchmark problem size.\n"
<< " --output_file=<str> Path to a CSV file to output results. If it exists already, results are appended.\n"
<< " --tag=<str> String tag to prepend to the CSV file.\n"
<< " --groups=<int> Number of individual GEMM problems (default: --groups=15)\n"
<< " --m=<int> Sets the M dimension for all groups. Otherwise, it is selected randomly\n"
<< " --n=<int> Sets the N dimension for all groups. Otherwise, it is selected randomly\n"
<< " --k=<int> Sets the K dimension for all groups. Otherwise, it is selected randomly\n"
<< " --alpha=<f32> Epilogue scalar alpha (real part)\n"
<< " --beta=<f32> Epilogue scalar beta (real part)\n"
<< " --scheduler-modes=<str> List of scheduler modes to be profile for grouped GEMM scheduler (default: --scheduler_modes=kDeviceOnly)\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --reference-check=<bool> If true, performs reference check.\n"
<< " --verbose=<bool> If true, prints problem sizes and batching structure.\n"
<< " --profile-initialization=<bool> If true, profiles the device-level kernel's initialization.\n"
<< " --sort-problems=<bool> If true, sorts problem sizes in descending order of GEMM-K dimension.\n";
out << "\n\nExamples:\n\n"
<< "# Runs a grouped GEMM with 100 random problem sizes\n"
<< "$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100\n\n"
<< "# Runs a grouped GEMM with 100 random problem sizes (with GEMM-K dimension equal to 1024)\n"
<< "$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100 --k=1024 --verbose=true\n\n"
<< "# Runs a grouped GEMM that is equivalent to a batched GEMM\n"
<< "$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100 --m=2048 --n=1024 --k=1024 --verbose=true\n\n"
<< "# Runs a grouped GEMM with each different scheduler mode\n"
<< "$ ./examples/24_gemm_grouped/24_gemm_grouped --scheduler-modes=all\n\n"
<< "# Runs a grouped GEMM with each different scheduler mode and profiles host-side initialization time\n"
<< "$ ./examples/24_gemm_grouped/24_gemm_grouped --scheduler-modes=all --profile-initialization=true\n\n"
<< "# Runs a grouped GEMM problem given an externally supplied benchmark file. This is a text file in which\n"
<< "# Each line contains a unique group index and an MxNxK triple indicating problemsize.\n"
<< "#\n"
<< "# For example, assume the following are the contents of 'problems.txt'\n"
<< "#\n"
<< "# 0 1024x256x520\n"
<< "# 1 520x264x1024\n"
<< "# 2 96x48x1024\n"
<< "#\n"
<< "$ ./examples/24_gemm_grouped/24_gemm_grouped --benchmark=problems.txt\n\n"
<< "# Execute Grouped GEMM and profile with NSight\n"
<< "$ nv-nsight-cu-cli ./examples/24_gemm_grouped/24_gemm_grouped --m=256 --n=256 --k=256 --verbose=true --iterations=1 --reference-check=false\n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = int64_t();
for (auto const & problem : problem_sizes) {
fmas += problem.product();
}
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
class BaseTestbed {
public:
//
// Type definitions
//
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using EpilogueOutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp;
using ElementCompute = typename EpilogueOutputOp::ElementCompute;
using LayoutA = typename Gemm::LayoutA;
using LayoutB = typename Gemm::LayoutB;
using LayoutC = typename Gemm::LayoutC;
using MatrixCoord = typename LayoutC::TensorCoord;
//
// Data members
//
Options & options;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint32_t seed;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device;
std::vector<int64_t> offset_A;
std::vector<int64_t> offset_B;
std::vector<int64_t> offset_C;
std::vector<int64_t> offset_D;
std::vector<int64_t> lda_host;
std::vector<int64_t> ldb_host;
std::vector<int64_t> ldc_host;
std::vector<int64_t> ldd_host;
cutlass::DeviceAllocation<int64_t> lda;
cutlass::DeviceAllocation<int64_t> ldb;
cutlass::DeviceAllocation<int64_t> ldc;
cutlass::DeviceAllocation<int64_t> ldd;
cutlass::DeviceAllocation<ElementA> block_A;
cutlass::DeviceAllocation<ElementB> block_B;
cutlass::DeviceAllocation<ElementC> block_C;
cutlass::DeviceAllocation<ElementC> block_D;
cutlass::DeviceAllocation<ElementA *> ptr_A;
cutlass::DeviceAllocation<ElementB *> ptr_B;
cutlass::DeviceAllocation<ElementC *> ptr_C;
cutlass::DeviceAllocation<ElementC *> ptr_D;
BaseTestbed(
Options &options_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
):
options(options_), init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
int problem_count() const {
return options.problem_count;
}
/// Helper to initialize a tensor view
template <typename Element>
void initialize_tensor(
Element *ptr,
size_t capacity,
cutlass::Distribution::Kind dist_kind,
uint32_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) {
scope_max = 5;
scope_min = -5;
}
else {
scope_max = 8;
scope_min = -8;
}
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::device::BlockFillRandomUniform(
ptr, capacity, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::device::BlockFillRandomGaussian(
ptr, capacity, seed, Element(), Element(0.5f));
}
else if (dist_kind == cutlass::Distribution::Sequential) {
// Fill with increasing elements
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(1), Element());
}
else {
// Fill with all 1s
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(), Element(1));
}
}
/// Allocates device-side data
void allocate() {
int64_t total_elements_A = 0;
int64_t total_elements_B = 0;
int64_t total_elements_C = 0;
int64_t total_elements_D = 0;
lda_host.resize(problem_count());
ldb_host.resize(problem_count());
ldc_host.resize(problem_count());
ldd_host.resize(problem_count());
for (int32_t i = 0; i < problem_count(); ++i) {
auto problem = options.problem_sizes.at(i);
lda_host.at(i) = LayoutA::packed({problem.m(), problem.k()}).stride(0);
ldb_host.at(i) = LayoutB::packed({problem.k(), problem.n()}).stride(0);
ldc_host.at(i) = LayoutC::packed({problem.m(), problem.n()}).stride(0);
ldd_host.at(i) = LayoutC::packed({problem.m(), problem.n()}).stride(0);
offset_A.push_back(total_elements_A);
offset_B.push_back(total_elements_B);
offset_C.push_back(total_elements_C);
offset_D.push_back(total_elements_D);
int64_t elements_A = problem.m() * problem.k();
int64_t elements_B = problem.k() * problem.n();
int64_t elements_C = problem.m() * problem.n();
int64_t elements_D = problem.m() * problem.n();
total_elements_A += elements_A;
total_elements_B += elements_B;
total_elements_C += elements_C;
total_elements_D += elements_D;
}
lda.reset(problem_count());
ldb.reset(problem_count());
ldc.reset(problem_count());
ldd.reset(problem_count());
block_A.reset(total_elements_A);
block_B.reset(total_elements_B);
block_C.reset(total_elements_C);
block_D.reset(total_elements_D);
}
/// Initializes device-side data
void initialize() {
problem_sizes_device.reset(problem_count());
problem_sizes_device.copy_from_host(options.problem_sizes.data());
lda.copy_from_host(lda_host.data());
ldb.copy_from_host(ldb_host.data());
ldc.copy_from_host(ldc_host.data());
ldd.copy_from_host(ldd_host.data());
//
// Assign pointers
//
std::vector<ElementA *> ptr_A_host(problem_count());
std::vector<ElementB *> ptr_B_host(problem_count());
std::vector<ElementC *> ptr_C_host(problem_count());
std::vector<ElementC *> ptr_D_host(problem_count());
for (int32_t i = 0; i < problem_count(); ++i) {
ptr_A_host.at(i) = block_A.get() + offset_A.at(i);
ptr_B_host.at(i) = block_B.get() + offset_B.at(i);
ptr_C_host.at(i) = block_C.get() + offset_C.at(i);
ptr_D_host.at(i) = block_D.get() + offset_D.at(i);
}
ptr_A.reset(problem_count());
ptr_A.copy_from_host(ptr_A_host.data());
ptr_B.reset(problem_count());
ptr_B.copy_from_host(ptr_B_host.data());
ptr_C.reset(problem_count());
ptr_C.copy_from_host(ptr_C_host.data());
ptr_D.reset(problem_count());
ptr_D.copy_from_host(ptr_D_host.data());
//
// Initialize the problems of the workspace
//
initialize_tensor(block_A.get(), block_A.size(), init_A, seed * 2021);
initialize_tensor(block_B.get(), block_B.size(), init_B, seed * 2022);
initialize_tensor(block_C.get(), block_C.size(), init_C, seed * 2023);
cutlass::reference::device::BlockFillSequential(
block_D.get(), block_D.size(), ElementC(), ElementC());
}
/// Verifies the result is a GEMM
bool verify() {
bool passed = true;
for (int32_t i = 0; i < problem_count(); ++i) {
cutlass::gemm::GemmCoord problem = options.problem_sizes.at(i);
LayoutA layout_A(lda_host.at(i));
LayoutB layout_B(ldb_host.at(i));
LayoutC layout_C(ldc_host.at(i));
LayoutC layout_D(ldd_host.at(i));
MatrixCoord extent_A{problem.m(), problem.k()};
MatrixCoord extent_B{problem.k(), problem.n()};
MatrixCoord extent_C{problem.m(), problem.n()};
cutlass::TensorView<ElementA, LayoutA> view_A(block_A.get() + offset_A.at(i), layout_A, extent_A);
cutlass::TensorView<ElementB, LayoutB> view_B(block_B.get() + offset_B.at(i), layout_B, extent_B);
cutlass::TensorView<ElementC, LayoutC> view_C(block_C.get() + offset_C.at(i), layout_C, extent_C);
cutlass::DeviceAllocation<ElementC> block_Ref(layout_D.capacity(extent_C));
cutlass::TensorView<ElementC, LayoutC> view_Ref_device(block_Ref.get(), layout_D, extent_C);
// Reference GEMM
cutlass::reference::device::GemmComplex<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute, ElementAccumulator
>(
problem,
options.alpha,
view_A,
Gemm::kTransformA,
view_B,
Gemm::kTransformB,
options.beta,
view_C,
view_Ref_device,
ElementAccumulator(0)
);
// Copy to host memory
std::vector<ElementC> matrix_D(layout_D.capacity(extent_C));
std::vector<ElementC> matrix_Ref(layout_D.capacity(extent_C));
cutlass::device_memory::copy_to_host(matrix_D.data(), block_D.get() + offset_D.at(i), matrix_D.size());
cutlass::device_memory::copy_to_host(matrix_Ref.data(), block_Ref.get(), matrix_D.size());
cutlass::TensorView<ElementC, LayoutC> view_D( matrix_D.data(), layout_D, extent_C);
cutlass::TensorView<ElementC, LayoutC> view_Ref(matrix_Ref.data(), layout_D, extent_C);
// Reference check
passed = cutlass::reference::host::TensorEquals(view_D, view_Ref);
if (!passed) {
std::cerr << "\n***\nError - problem " << i << " failed the QA check\n***\n" << std::endl;
return passed;
}
}
return passed;
}
};
template <typename Gemm>
class TestbedBatched : BaseTestbed<Gemm> {
public:
TestbedBatched(
Options &options_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
): BaseTestbed<Gemm>(options_, init_A_, init_B_, init_C_, seed_) {}
void print_problem_sizes() {
std::cout << std::endl;
size_t bin_idx = 0;
size_t problem_count_check = 0;
std::cout << "Conventionally executed as " << this->options.problem_bins.size() << " batched GEMMs:\n";
for (auto const & bin : this->options.problem_bins) {
std::cout << " [" << bin_idx << "]: "
<< bin.first.m() << "-by-" << bin.first.n() << "-by-" << bin.first.k()
<< ", batch count: " << bin.second.size() << "\n";
++bin_idx;
problem_count_check += bin.second.size();
}
if (problem_count_check != size_t(this->problem_count())) {
std::cout << "\n***\nERROR in BINNING LOGIC!\n***\n" << std::endl;
}
std::cout << std::endl;
}
/// Executes a batched kernel and measures runtime
Result profile() {
std::cout << "Batched GEMM:\n"
<< "====================================================" << std::endl;
Result result;
result.passed = false;
// Initialize the problem
this->allocate();
this->initialize();
if (this->options.verbose) {
print_problem_sizes();
}
//
// Prepare batched GEMM environment
//
int32_t effective_streams = (this->options.cuda_streams ? this->options.cuda_streams : 1);
// Array of leading dimensions used by batched GEMM calls
std::vector<cutlass::gemm::GemmCoord> bin_problem_sizes;
std::vector<int32_t> bin_count;
std::vector<int32_t> bin_ldm_A;
std::vector<int32_t> bin_ldm_B;
std::vector<int32_t> bin_ldm_C;
std::vector<int32_t> bin_start;
std::vector<void const *> ptr_A_batched_host;
std::vector<void const *> ptr_B_batched_host;
std::vector<void *> ptr_C_batched_host;
for (auto const & bin : this->options.problem_bins) {
int first_idx = bin.second.front();
bin_problem_sizes.push_back(this->options.problem_sizes.at(first_idx));
bin_count.push_back(int32_t(bin.second.size()));
bin_ldm_A.push_back(static_cast<int32_t>(this->lda_host.at(first_idx)));
bin_ldm_B.push_back(static_cast<int32_t>(this->ldb_host.at(first_idx)));
bin_ldm_C.push_back(static_cast<int32_t>(this->ldc_host.at(first_idx)));
if (ptr_A_batched_host.size() % 2) {
ptr_A_batched_host.push_back(nullptr);
ptr_B_batched_host.push_back(nullptr);
ptr_C_batched_host.push_back(nullptr);
}
bin_start.push_back(int32_t(ptr_A_batched_host.size()));
for (int idx : bin.second) {
if (bin_problem_sizes.back() != this->options.problem_sizes.at(idx)) {
std::cerr << "Error - failed to group problems.\n";
return result;
}
if (bin_ldm_A.back() != this->lda_host.at(idx)) {
std::cerr << "Error - failed to group problems.\n";
return result;
}
if (bin_ldm_B.back() != this->ldb_host.at(idx)) {
std::cerr << "Error - failed to group problems.\n";
return result;
}
if (bin_ldm_C.back() != this->ldc_host.at(idx)) {
std::cerr << "Error - failed to group problems.\n";
return result;
}
ptr_A_batched_host.push_back(this->block_A.get() + this->offset_A.at(idx));
ptr_B_batched_host.push_back(this->block_B.get() + this->offset_B.at(idx));
ptr_C_batched_host.push_back(this->block_D.get() + this->offset_C.at(idx));
}
}
// Array of GMEM pointers used by batched array GEMM calls
cutlass::DeviceAllocation<void const *> ptr_A_batched;
cutlass::DeviceAllocation<void const *> ptr_B_batched;
cutlass::DeviceAllocation<void *> ptr_C_batched;
ptr_A_batched.reset(ptr_A_batched_host.size());
ptr_B_batched.reset(ptr_A_batched_host.size());
ptr_C_batched.reset(ptr_A_batched_host.size());
ptr_A_batched.copy_from_host(ptr_A_batched_host.data());
ptr_B_batched.copy_from_host(ptr_B_batched_host.data());
ptr_C_batched.copy_from_host(ptr_C_batched_host.data());
//
// Create CUDA streams to maximize concurrency of batched-array GEMM kernels
//
std::vector<cudaStream_t> cuda_streams;
//
// Warmup run
//
if (this->options.cuda_streams) {
for (int i = 0; i < this->options.cuda_streams; ++i) {
cudaStream_t stream;
result.error = cudaStreamCreate(&stream);
if (result.error != cudaSuccess) {
std::cerr << "Failed to create CUDA stream." << std::endl;
return result;
}
cuda_streams.push_back(stream);
}
}
else {
cuda_streams.push_back(nullptr);
}
// Use 'D' for the in/out workspace
this->block_D.copy_from_device(this->block_C.get());
for (int bin_idx = 0; bin_idx < int32_t(bin_problem_sizes.size()); ++bin_idx) {
cutlass::gemm::GemmCoord const & problem = bin_problem_sizes[bin_idx];
int32_t batch_count = bin_count[bin_idx];
int32_t bin_start_idx = bin_start[bin_idx];
int32_t lda = bin_ldm_A[bin_idx];
int32_t ldb = bin_ldm_B[bin_idx];
int32_t ldc = bin_ldm_C[bin_idx];
void const ** ptr_A_array = ptr_A_batched.get() + bin_start[bin_idx];
void const ** ptr_B_array = ptr_B_batched.get() + bin_start[bin_idx];
void ** ptr_C_array = ptr_C_batched.get() + bin_start[bin_idx];
//
// Initialize the CUTLASS GEMM operator
//
// Configure the GEMM arguments
typename Gemm::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta);
typename Gemm::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kArray,
problem,
batch_count,
epilogue_op,
(void const *)ptr_A_array,
(void const *)ptr_B_array,
(void const *)ptr_C_array,
(void *)ptr_C_array,
int64_t(),
int64_t(),
int64_t(),
int64_t(),
int64_t(lda),
int64_t(ldb),
int64_t(ldc),
int64_t(ldc)
};
Gemm gemm_op;
cutlass::Status status = gemm_op.initialize(arguments);
if (status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS error on line " << __LINE__ << std::endl;
return result;
}
status = gemm_op();
if (status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS error on line " << __LINE__ << std::endl;
return result;
}
}
//
// Wait for completion
//
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
//
// Wait for completion
//
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
// Record an event at the start of a series of GEMM operations
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
int last_stream_idx = 0;
for (int iter = 0; iter < this->options.iterations; ++iter) {
for (int bin_idx = 0; bin_idx < int32_t(bin_problem_sizes.size()); ++bin_idx) {
cutlass::gemm::GemmCoord const & problem = bin_problem_sizes[bin_idx];
int32_t batch_count = bin_count[bin_idx];
int32_t bin_start_idx = bin_start[bin_idx];
int32_t lda = bin_ldm_A[bin_idx];
int32_t ldb = bin_ldm_B[bin_idx];
int32_t ldc = bin_ldm_C[bin_idx];
void const ** ptr_A_array = ptr_A_batched.get() + bin_start[bin_idx];
void const ** ptr_B_array = ptr_B_batched.get() + bin_start[bin_idx];
void ** ptr_C_array = ptr_C_batched.get() + bin_start[bin_idx];
last_stream_idx = (bin_idx % effective_streams);
//
// Initialize the CUTLASS GEMM operator
//
// Configure the GEMM arguments
typename Gemm::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta);
typename Gemm::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kArray,
problem,
batch_count,
epilogue_op,
(void const *)ptr_A_array,
(void const *)ptr_B_array,
(void const *)ptr_C_array,
(void *)ptr_C_array,
int64_t(),
int64_t(),
int64_t(),
int64_t(),
int64_t(lda),
int64_t(ldb),
int64_t(ldc),
int64_t(ldc)
};
Gemm gemm_op;
cutlass::Status status = gemm_op.initialize(arguments);
if (status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS error on line " << __LINE__ << std::endl;
return result;
}
status = gemm_op(cuda_streams[last_stream_idx]);
if (status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS error on line " << __LINE__ << std::endl;
return result;
}
}
}
//
// Stop profiling loop
//
// Record an event when the GEMM operations have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Wait for work to be completed
//
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(this->options.iterations);
result.gflops = this->options.gflops(result.runtime_ms / 1000.0);
//
// Cleanup
//
for (auto event : events) {
(void)cudaEventDestroy(event);
}
for (auto stream : cuda_streams) {
if (stream) {
(void)cudaStreamDestroy(stream);
}
}
std::cout << " " << this->options.problem_bins.size() << " batched GEMMs launched" << std::endl;
std::cout << std::endl;
std::cout << " " << "Batched Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " " << "Batched GFLOPs: " << result.gflops << std::endl;
std::string provider = "CUTLASS";
if (this->options.output_file.good()) {
this->options.output_file << this->options.output_tag << "," << provider << ",batched,"
<< this->options.problem_count << "," << result.runtime_ms << "," << result.gflops << std::endl;
}
result.passed = true;
return result;
}
};
template <typename Gemm_, cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode_>
class TestbedGrouped : BaseTestbed<Gemm_> {
public:
TestbedGrouped(
Options &options_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
): BaseTestbed<Gemm_>(options_, init_A_, init_B_, init_C_, seed_) {}
// Redefine GEMM with different GroupScheduleMode_
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
typename Gemm_::ElementA,
typename Gemm_::LayoutA,
Gemm_::kTransformA,
Gemm_::kAlignmentA,
typename Gemm_::ElementB,
typename Gemm_::LayoutB,
Gemm_::kTransformB,
Gemm_::kAlignmentB,
typename Gemm_::ElementC,
typename Gemm_::LayoutC,
typename Gemm_::ElementAccumulator,
typename Gemm_::OperatorClass,
typename Gemm_::ArchTag,
typename Gemm_::ThreadblockShape,
typename Gemm_::WarpShape,
typename Gemm_::InstructionShape,
typename Gemm_::EpilogueOutputOp,
typename Gemm_::ThreadblockSwizzle,
Gemm_::kStages,
GroupScheduleMode_>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
/// Verbose printing of problem sizes
void print_problem_sizes() {
std::cout << std::endl;
// Print groups
std::cout << this->problem_count() << " groups:\n";
int32_t idx = 0;
int64_t total_tiles = 0;
for (auto const & problem : this->options.problem_sizes) {
int tiles = Gemm::problem_tile_count(problem);
total_tiles += tiles;
std::cout << " [" << idx << "]: "
<< problem.m() << "-by-" << problem.n() << "-by-" << problem.k()
<< " (" << tiles << " threadblock tiles)" << "\n";
++idx;
}
std::cout << std::endl;
}
/// Sort problems in descending order of problem-K dimension
void sort_problems() {
Gemm::sort_problems(this->options.problem_count,
this->options.problem_sizes.data(),
this->lda_host.data(),
this->ldb_host.data(),
this->ldc_host.data(),
this->ldd_host.data(),
this->offset_A.data(),
this->offset_B.data(),
this->offset_C.data(),
this->offset_D.data());
}
/// Executes a grouped kernel and measures runtime
Result profile() {
std::string sched_mode = this->options.scheduler_mode_to_str.find(GroupScheduleMode_)->second;
std::cout << std::endl;
std::cout << "Grouped GEMM (CUTLASS) with mode " << sched_mode << ":\n"
<< "====================================================" << std::endl;
Result result;
int threadblock_count = Gemm::sufficient(this->options.problem_sizes.data(), this->options.problem_count);
// Early exit
if (!threadblock_count) {
std::cout << "Active CUDA device lacks hardware resources to run CUTLASS Grouped GEMM kernel." << std::endl;
return result;
}
result.passed = false;
// Initialize the problem
this->allocate();
if (this->options.sort_problems) {
sort_problems();
}
this->initialize();
if (this->options.verbose) {
print_problem_sizes();
}
// Configure the GEMM arguments
typename Gemm::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta);
// Configure GEMM arguments
typename Gemm::Arguments args(
this->problem_sizes_device.get(),
this->problem_count(),
threadblock_count,
epilogue_op,
this->ptr_A.get(),
this->ptr_B.get(),
this->ptr_C.get(),
this->ptr_D.get(),
this->lda.get(),
this->ldb.get(),
this->ldc.get(),
this->ldd.get(),
this->options.problem_sizes.data()
);
// Initialize the GEMM object
Gemm gemm;
size_t workspace_size = gemm.get_workspace_size(args);
cutlass::DeviceAllocation<uint8_t> workspace(workspace_size);
result.status = gemm.initialize(args, workspace.get());
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to initialize CUTLASS Grouped GEMM kernel." << std::endl;
return result;
}
// Run the grouped GEMM object
result.status = gemm.run();
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS Grouped GEMM kernel." << std::endl;
return result;
}
// Wait for completion
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
//
// Verify correctness
//
result.passed = true;
if (this->options.reference_check) {
result.passed = this->verify();
}
//
// Warm-up run of the grouped GEMM object
//
result.status = gemm.run();
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS Grouped GEMM kernel." << std::endl;
return result;
}
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMM operations
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
for (int iter = 0; iter < this->options.iterations; ++iter) {
gemm();
}
//
// Stop profiling loop
//
// Record an event when the GEMM operations have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(this->options.iterations);
result.gflops = this->options.gflops(result.runtime_ms / 1000.0);
//
// Cleanup
//
for (auto event : events) {
(void)cudaEventDestroy(event);
}
// Optionally profile initialization
if (this->options.profile_initialization) {
// Warm up
gemm.initialize(args, workspace.get());
auto start_time = std::chrono::high_resolution_clock::now();
for (int32_t i = 0; i < this->options.iterations; ++i) {
gemm.initialize(args, workspace.get());
}
auto end_time = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> duration = end_time - start_time;
duration /= double(this->options.iterations);
result.initialization_time_ms = duration.count();
}
int64_t total_tiles = Gemm::group_tile_count(args);
std::cout << " " << total_tiles << " total threadblock tiles." << std::endl;
std::cout << std::endl;
std::cout << " " << "Grouped Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " " << "Grouped GFLOPs: " << result.gflops << std::endl;
if (this->options.profile_initialization) {
std::cout << " " << "Init Runtime: " << result.initialization_time_ms << " ms" << std::endl;
}
if (this->options.output_file.good()) {
this->options.output_file << this->options.output_tag << ",CUTLASS,grouped-" << sched_mode << ","
<< this->options.problem_count << "," << result.runtime_ms << "," << result.gflops << std::endl;
}
std::cout << "\nPassed\n";
return result;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) {
//
// This example requires an NVIDIA Ampere-architecture GPU.
//
std::cout
<< "CUTLASS's Grouped GEMM example requires a GPU of NVIDIA's Ampere Architecture or "
<< "later (compute capability 80 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.error) {
std::cerr << "Aborting execution." << std::endl;
return -1;
}
//
// Define the Grouped and Batched GEMM types
//
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
// Gemm operator cutlass_tensorop_f16_s16816gemm_f16_128x128_32x4_nt_align8
using GemmBatched = cutlass::gemm::device::GemmUniversal<
ElementA, LayoutA,
ElementB, LayoutB,
ElementOutput, LayoutC,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
4
>;
// Define a grouped GEMM kernel with all template parameters set except
// for scheduling mode. This will be used as the template for all scheduling
// modes executed.
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
ElementA,
LayoutA,
cutlass::ComplexTransform::kNone,
8,
ElementB,
LayoutB,
cutlass::ComplexTransform::kNone,
8,
ElementOutput, LayoutC,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator, ElementAccumulator>,
// NOTE: Threadblock swizzling is currently not supported by CUTLASS's grouped kernels.
// This parameter is passed in at present to match the APIs of other kernels. The parameter
// is unused within the kernel.
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
4>::GemmKernel;
using GemmGrouped = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Profile it
//
TestbedBatched<GemmBatched> testbed_batched(options);
Result result = testbed_batched.profile();
if (result.error) {
return 1;
}
using GroupScheduleMode = cutlass::gemm::kernel::GroupScheduleMode;
for (GroupScheduleMode mode : options.scheduler_modes) {
Result result;
switch (mode) {
case GroupScheduleMode::kDeviceOnly:
{
TestbedGrouped<GemmGrouped, GroupScheduleMode::kDeviceOnly> runner(options);
result = runner.profile();
break;
}
case GroupScheduleMode::kHostPrecompute:
{
TestbedGrouped<GemmGrouped, GroupScheduleMode::kHostPrecompute> runner(options);
result = runner.profile();
break;
}
}
if (result.error != cudaSuccess) {
return 1;
}
// Override verbose flag to avoid printing duplicate information for each scheduling mode
options.verbose = false;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/24_gemm_grouped/gemm_grouped.cu/0 | {
"file_path": "examples/24_gemm_grouped/gemm_grouped.cu",
"repo_id": "examples",
"token_count": 20904
} | 3 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <float.h>
#include <stdio.h>
#include <cmath>
////////////////////////////////////////////////////////////////////////////////
// Debugging functions
////////////////////////////////////////////////////////////////////////////////
// Nans & inf detection
#define NANCHECK(frag) \
{ \
for (size_t _i = 0; _i < frag.size(); ++_i) { \
assert(std::isfinite(float(frag[_i]))); \
assert(!std::isnan(float(frag[_i]))); \
} \
}
// Print on the first thread of the first block
#if 1
#define PRINT_WARP_ID 0
#define PRINT_LANE_ID 0
#define PRINT_B0_T0(msg, ...) \
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && \
threadIdx.x == PRINT_LANE_ID && threadIdx.y == PRINT_WARP_ID && \
threadIdx.z == 0) { \
printf(msg "\n", ##__VA_ARGS__); \
}
#define PRINT_T0(msg, ...) \
if (threadIdx.x == PRINT_LANE_ID && threadIdx.y == PRINT_WARP_ID && \
threadIdx.z == 0) { \
printf(msg "\n", ##__VA_ARGS__); \
}
#define PRINT_TX_LX(msg, ...) \
for (int bx = 0; bx < gridDim.x; ++bx) { \
for (int by = 0; by < gridDim.y; ++by) { \
for (int bz = 0; bz < gridDim.z; ++bz) { \
for (int tx = 0; tx < blockDim.x; ++tx) { \
for (int ty = 0; ty < blockDim.y; ++ty) { \
for (int tz = 0; tz < blockDim.z; ++tz) { \
__syncthreads(); \
if (blockIdx.x == bx && blockIdx.y == by && blockIdx.z == bz && \
threadIdx.x == tx && threadIdx.y == ty && \
threadIdx.z == tz) { \
printf( \
"[%d,%d,%d][%d,%d,%d]" msg "\n", \
bx, \
by, \
bz, \
tx, \
ty, \
tz, \
##__VA_ARGS__); \
} \
} \
} \
} \
} \
} \
}
#else
#define PRINT_B0_T0
#define PRINT_TX_LX
#endif
struct __string_view {
char const* data;
std::size_t size;
};
#if __cplusplus >= 201402L
template <class T>
constexpr __string_view __get_type_name() {
char const* p = __PRETTY_FUNCTION__;
while (*p++ != '=')
;
for (; *p == ' '; ++p)
;
char const* p2 = p;
int count = 1;
for (;; ++p2) {
switch (*p2) {
case '[':
++count;
break;
case ']':
--count;
if (!count)
return {p, std::size_t(p2 - p)};
}
}
return {};
}
#else
template <class T>
constexpr __string_view __get_type_name() {
return {"unsupported", 11};
}
#endif
// Print a given array
#define PRINT_ACCUM8_T0_L0_START(name, accum, start) \
PRINT_B0_T0( \
"%s[%d:%d] - {%f, %f, %f, %f, %f, %f, %f, %f}", \
name, \
int(start), \
int(start + 8), \
float(accum[start + 0]), \
float(accum[start + 1]), \
float(accum[start + 2]), \
float(accum[start + 3]), \
float(accum[start + 4]), \
float(accum[start + 5]), \
float(accum[start + 6]), \
float(accum[start + 7]));
#define PRINT_ACCUM8_T0_L0(name, accum) PRINT_ACCUM8_T0_L0_START(name, accum, 0)
#define PRINT_FRAG_T0_L0(name, frag) \
{ \
auto typeStr = __get_type_name<decltype(frag)>(); \
PRINT_B0_T0("printing %s (%s)", name, typeStr.data); \
for (size_t _start = 0; _start < frag.size(); _start += 8) { \
PRINT_ACCUM8_T0_L0_START(" ", frag, _start); \
} \
/*__syncthreads(); \
NANCHECK(frag); */ \
}
#define PRINT_ARRAY_T0_L0_INCR(name, array, length, incr) \
{ \
PRINT_B0_T0("printing %s (len=%d)", name, int(length)); \
for (int _start = 0; _start < length; _start += incr) { \
PRINT_ACCUM8_T0_L0_START(" ", array, _start); \
} \
}
#define PRINT_ARRAY_T0_L0(name, array, length) \
PRINT_ARRAY_T0_L0_INCR(name, array, length, 8)
// Print a 4x4 matrix
#define PRINT_TENSOR4x4_T0_L0_START(name, ref, start_x, start_y) \
PRINT_B0_T0( \
"%s[%d:%d, %d:%d]:\n %f, %f, %f, %f\n %f, %f, %f, %f\n %f, %f, %f, %f\n %f, %f, %f, %f", \
name, \
int(start_x), \
int(start_x + 4), \
int(start_y), \
int(start_y + 4), \
float(ref.at({start_x + 0, start_y + 0})), \
float(ref.at({start_x + 0, start_y + 1})), \
float(ref.at({start_x + 0, start_y + 2})), \
float(ref.at({start_x + 0, start_y + 3})), \
float(ref.at({start_x + 1, start_y + 0})), \
float(ref.at({start_x + 1, start_y + 1})), \
float(ref.at({start_x + 1, start_y + 2})), \
float(ref.at({start_x + 1, start_y + 3})), \
float(ref.at({start_x + 2, start_y + 0})), \
float(ref.at({start_x + 2, start_y + 1})), \
float(ref.at({start_x + 2, start_y + 2})), \
float(ref.at({start_x + 2, start_y + 3})), \
float(ref.at({start_x + 3, start_y + 0})), \
float(ref.at({start_x + 3, start_y + 1})), \
float(ref.at({start_x + 3, start_y + 2})), \
float(ref.at({start_x + 3, start_y + 3})));
#define PRINT_TENSOR4x4_T0_L0(name, ref) \
PRINT_TENSOR4x4_T0_L0_START(name, ref, 0, 0)
#define PRINT_PROBLEM_SIZE(name, ps) \
PRINT_B0_T0( \
"%s.problem_size: {.m=%d, .n=%d, .k=%d}", \
name, \
int(ps.m()), \
int(ps.n()), \
int(ps.k()))
template <typename LambdaIterator, typename LaneOffsetT, typename AccumT>
CUTLASS_DEVICE void print_warp_accum(
AccumT accum,
LaneOffsetT lane_offset,
int32_t num_rows,
int32_t num_cols) {
bool is_main = blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 &&
threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0;
for (int row = 0; row < num_rows; ++row) {
for (int col = 0; col < num_cols; ++col) {
if (col % 32 == 0) {
if (is_main) {
printf("\nmat[%3d, %3d:%3d]", row, col, col + 32);
}
__syncthreads();
}
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {},
[&](int accum_m, int accum_n, int idx) {
if (row == accum_m && col == accum_n &&
(blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0)) {
printf(" %6.1f", float(accum[idx]));
}
},
[&](int accum_m) {});
__syncthreads();
}
if (is_main) {
printf("\n");
}
}
}
| examples/41_fused_multi_head_attention/debug_utils.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/debug_utils.h",
"repo_id": "examples",
"token_count": 7526
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/functional.h"
#include "cutlass/gemm/warp/mma_simt_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/matrix_shape.h"
/*
TensorCores have different accumulator layouts.
This file provides a class to easily map the accumulator
i-th element with the corresponding matrix row/col.
*/
template <typename T, typename accum_t, int kWarpSize>
struct AccumLambdaIteratorSm80 {
static_assert(
cutlass::platform::
is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
"only RowMajor is supported");
using Policy = typename T::Policy;
using InstructionShape = typename T::InstructionShape;
using OpDelta = typename T::OpDelta;
using Shape = typename T::Shape;
static int const kElementsPerAccess = InstructionShape::kN / 4;
static int const kRowsPerTile = 8;
static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
static cutlass::MatrixCoord CUTLASS_DEVICE get_lane_offset(
int8_t lane_id,
int8_t warp_id,
typename T::TensorCoord const& tile_offset) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
return cutlass::MatrixCoord(
quad + tile_offset.row() * Shape::kRow,
lane_in_quad * kElementsPerAccess +
tile_offset.column() * Shape::kColumn);
}
template <typename FA, typename FB, typename FC>
CUTLASS_DEVICE static void iterateRows(
cutlass::MatrixCoord& lane_offset,
FA beginRow,
FB op,
FC endRow) {
// See cutlass/gemm/warp/mma_tensor_op_tile_iterator.h
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile + lane_offset.row();
beginRow(accum_m);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn +
col + lane_offset.column();
int idx = mma_accum_start + row * kElementsPerAccess + col;
op(accum_m, accum_n, idx);
}
}
endRow(accum_m);
}
}
}
template <typename DT, typename F>
CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) {
// In each warp, 4 threads will work on the same row
// - the ones with the same `quad`
auto otherV = __shfl_xor_sync(0xffffffff, myValue, 1);
myValue = fn(myValue, otherV);
otherV = __shfl_xor_sync(0xffffffff, myValue, 2);
myValue = fn(myValue, otherV);
int lane_in_quad = (lane_id & 3);
return lane_in_quad == 0;
}
};
template <typename T, typename accum_t, int kWarpSize>
struct AccumLambdaIteratorSm70 {
static_assert(
cutlass::platform::
is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
"only RowMajor is supported");
using Policy = typename T::Policy;
using InstructionShape = typename T::InstructionShape;
using OpDelta = typename T::OpDelta;
using Shape = typename T::Shape;
using Element = accum_t;
static int const kElementsPerPartial = 4;
using EleShapePerPatial = typename cutlass::platform::conditional<
cutlass::platform::is_same<Element, float>::value,
cutlass::MatrixShape<2, 2>,
cutlass::MatrixShape<1, 4>>::type;
static int const kElementsPerMma = 8;
static int const kAccumulatorPatials = 2;
using QuadShapePerPatialMma = cutlass::MatrixShape<4, 4>;
static cutlass::MatrixCoord CUTLASS_DEVICE get_lane_offset(
int8_t lane_id,
int8_t warp_id,
typename T::TensorCoord const& tile_offset) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
int accum_m, accum_n;
if (cutlass::platform::is_same<Element, float>::value) {
// (quad[2],quad[0])+lane_in_quad[0]
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1);
// (quad[1])+lane_in_quad[1]
accum_n =
((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials +
(lane_in_quad & 2);
} else {
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 +
lane_in_quad; // (quad[2],quad[0])
accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials;
}
return cutlass::MatrixCoord(
accum_m + tile_offset.row() * Shape::kRow,
accum_n + tile_offset.column() * Shape::kColumn);
}
template <typename DT, typename F>
CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) {
static_assert(
cutlass::platform::is_same<Element, float>::value,
"update to support non-float accum");
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-884-f16
// T0 & T2 share same line within a quad
auto otherV = __shfl_xor_sync(0xffffffff, myValue, 1 << 1);
myValue = fn(myValue, otherV);
// quad 0 and quad 2 are on the same lines
otherV = __shfl_xor_sync(0xffffffff, myValue, 1 << 3);
myValue = fn(myValue, otherV);
return (lane_id & ((1 << 1) | (1 << 3))) == 0;
}
template <typename FA, typename FB, typename FC>
CUTLASS_DEVICE static void iterateRows(
cutlass::MatrixCoord& lane_offset,
FA beginRow,
FB op,
FC endRow) {
CUTLASS_PRAGMA_UNROLL
for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < EleShapePerPatial::kRow; ++m) {
int accum_m = tile_m * Policy::InterleavedTile::kRow +
mma_m * QuadShapePerPatialMma::kRow + m * 2 + lane_offset.row();
beginRow(accum_m);
CUTLASS_PRAGMA_UNROLL
for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn;
++tile_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn;
++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < kAccumulatorPatials; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < EleShapePerPatial::kColumn; ++n) {
int mma_accum_start =
(((tile_n * Policy::TileIterations::kRow + tile_m) *
Policy::MmaIterations::kColumn +
mma_n) *
Policy::MmaIterations::kRow +
mma_m) *
kElementsPerMma;
int accum_n = tile_n * Policy::InterleavedTile::kColumn +
mma_n * QuadShapePerPatialMma::kColumn +
p * Policy::InterleavedTile::kColumn / 2 + n +
lane_offset.column();
int idx = mma_accum_start + p * kElementsPerPartial +
m * EleShapePerPatial::kColumn + n;
op(accum_m, accum_n, idx);
}
}
}
}
endRow(accum_m);
}
}
}
}
};
template <typename T, typename accum_t, int kWarpSize>
struct AccumLambdaIteratorSimt {
using Policy = typename T::Policy;
using Iterations = typename T::Iterations;
using Element = typename T::Element;
using Delta = typename T::Delta;
using Shape = typename T::Shape;
static_assert(
cutlass::platform::
is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
"only RowMajor is supported");
template <typename DT, typename F>
CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) {
CUTLASS_PRAGMA_UNROLL
for (int bit = 1; bit < Policy::WarpShape::kColumn; bit *= 2) {
auto otherV = __shfl_xor_sync(0xffffffff, myValue, bit);
myValue = fn(myValue, otherV);
}
return (lane_id & (Policy::WarpShape::kColumn - 1)) == 0;
}
template <typename FA, typename FB, typename FC>
CUTLASS_DEVICE static void iterateRows(
cutlass::MatrixCoord& lane_offset,
FA beginRow,
FB op,
FC endRow) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) {
int accum_m = mma_m * Delta::kRow + m + lane_offset.row();
beginRow(accum_m);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) {
int accum_n =
mma_n * Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN +
lane_offset.column();
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) {
int idx = n +
Policy::LaneMmaShape::kN *
(mma_n +
Iterations::kColumn *
(m + mma_m * Policy::LaneMmaShape::kM));
op(accum_m, accum_n + n, idx);
}
}
endRow(accum_m);
}
}
}
static cutlass::MatrixCoord CUTLASS_DEVICE get_lane_offset(
int8_t lane_id,
int8_t warp_id,
typename T::TensorCoord const& tile_offset) {
static_assert(
cutlass::platform::is_same<
typename Policy::LaneLayout,
cutlass::layout::RowMajorInterleaved<1>>::value,
"");
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
cutlass::MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
cutlass::MatrixCoord(Policy::LaneMmaShape::kM,
Policy::LaneMmaShape::kN);
return lane_offset +
tile_offset * cutlass::MatrixCoord(Shape::kRow, Shape::kColumn);
}
};
template <typename T, typename accum_t, int kWarpSize>
struct DefaultMmaAccumLambdaIterator;
// Simt
template <typename S, typename P, typename accum_t, int kWarpSize>
struct DefaultMmaAccumLambdaIterator<
cutlass::gemm::warp::MmaSimtTileIterator<
S,
cutlass::gemm::Operand::kC,
accum_t,
cutlass::layout::RowMajor,
P,
1,
1>,
accum_t,
kWarpSize> {
using WarpIterator = typename cutlass::gemm::warp::MmaSimtTileIterator<
S,
cutlass::gemm::Operand::kC,
accum_t,
cutlass::layout::RowMajor,
P,
1,
1>;
using Iterator = AccumLambdaIteratorSimt<WarpIterator, accum_t, kWarpSize>;
};
// TensorOp - Volta
template <typename S1, typename S2, typename accum_t, int kWarpSize>
struct DefaultMmaAccumLambdaIterator<
cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<
S1,
accum_t,
cutlass::layout::RowMajor,
S2,
cutlass::MatrixShape<1, 1>>,
accum_t,
kWarpSize> {
using WarpIterator =
typename cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<
S1,
accum_t,
cutlass::layout::RowMajor,
S2,
cutlass::MatrixShape<1, 1>>;
using Iterator = AccumLambdaIteratorSm70<WarpIterator, accum_t, kWarpSize>;
};
// TensorOp - Sm75+
template <
typename S1,
typename S2,
typename S3,
typename accum_t,
int kWarpSize>
struct DefaultMmaAccumLambdaIterator<
cutlass::gemm::warp::MmaTensorOpAccumulatorTileIterator<
S1,
accum_t,
cutlass::layout::RowMajor,
S2,
S3>,
accum_t,
kWarpSize> {
using WarpIterator =
typename cutlass::gemm::warp::MmaTensorOpAccumulatorTileIterator<
S1,
accum_t,
cutlass::layout::RowMajor,
S2,
S3>;
using Iterator = AccumLambdaIteratorSm80<WarpIterator, accum_t, kWarpSize>;
};
| examples/41_fused_multi_head_attention/gemm/mma_accum_lambda_iterator.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/gemm/mma_accum_lambda_iterator.h",
"repo_id": "examples",
"token_count": 6214
} | 5 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import helper
import gen_ir as ir
class gen_test:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.gen_class_name = gen_class_name
self.user_header_file = user_header_file
self.sample_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
def gen_cpp_sample(self):
code = "/* Auto Generated code - Do not edit.*/\n"
code += "#include <stdio.h> \n"
code += "#include \"cutlass/gemm/device/gemm_batched.h\" \n"
code += "#include \"cutlass/cutlass.h\" \n"
code += "#include \"../cutlass_irrelevant.h\" \n"
code += "#include \"../cutlass_verify.h\" \n"
code += "#include \"leaky_bias.h\" \n"
code += "#include \"utils.h\" \n"
code += "int main(int args, char * argv[]) {\n"
code += " " + "int M = atoi(argv[1]);\n"
code += " " + "int K0 = " + str(self.fuse_gemm_info[0]['mnk'][0]) + ";\n"
code += " " + "if(args == 3);\n"
code += " " + " " + "K0 = atoi(argv[2]);\n"
code += " " + "int B = 1;\n"
code += " " + "if(args == 4);\n"
code += " " + " " + "B = atoi(argv[3]);\n"
code += " " + "srand(1234UL);\n"
code += " " + "int device_id = 0;\n"
code += " " + "cudaGetDevice(&device_id);\n"
code += " " + "cudaDeviceProp prop;\n"
code += " " + "cudaGetDeviceProperties(&prop, device_id);\n"
code += " " + "int sm = prop.major *10 + prop.minor;\n"
code += "using ElementCompute = cutlass::half_t;\n"
for i in range(self.b2b_num):
code += " " + helper.var_idx("ElementCompute alpha", i) + " = ElementCompute(1);\n"
addbias = helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i])
if addbias:
code += " " + helper.var_idx("ElementCompute beta", i) + " = ElementCompute(1);\n"
else:
code += " " + helper.var_idx("ElementCompute beta", i) + " = ElementCompute(0);\n"
code += " " + "size_t flops = 0;\n"
for i in range(self.b2b_num):
m = self.fuse_gemm_info[i]['mnk'][0]
n = self.fuse_gemm_info[i]['mnk'][1]
k = self.fuse_gemm_info[i]['mnk'][2]
bias_shape = helper.get_epilogue_bias_shape(self.fuse_gemm_info[i])
this_k = "K0"
if (i > 0):
this_k = str(k)
code += " " + "flops += size_t(2) * size_t(M) * size_t(B) * " + "size_t(" + str(n) + ") * size_t(" + this_k + ");\n"
code += " " + helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(" + "M" + ", " + str(n) + ", " + this_k + ");\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_A", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_", i) + ".k());\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_B", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".n() * problem_size_", i) + ".k());\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_C", i) + "(B * " + str(bias_shape[0]) + " * " + str(bias_shape[1]) + ");\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_D_cutlass_ref", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_", i) + ".n());\n"
code += " " + helper.var_idx("Mat_A", i) + ".init();\n"
code += " " + helper.var_idx("Mat_B", i) + ".init();\n"
code += " " + helper.var_idx("Mat_C", i) + ".init();\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_D", self.b2b_num - 1) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_",self.b2b_num - 1) + ".n());\n"
params = []
params.append("M")
params.append("B")
params.append("Mat_A0.device_ptr")
for i in range(self.b2b_num):
params.append(helper.var_idx("Mat_B", i) + ".device_ptr")
params.append(helper.var_idx("Mat_C", i) + ".device_ptr")
if i != self.b2b_num-1:
params.append(helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr")
params.append(helper.var_idx("Mat_D", self.b2b_num - 1) + ".device_ptr")
code += " " + "Param arguments = {\n"
code += " " + " " + "M,\n"
code += " " + " " + "K0,\n"
code += " " + " " + "B,\n"
code += " " + " " + "reinterpret_cast<const void*>(Mat_A0.device_ptr),\n"
cnt = 1
for i in range(self.b2b_num):
bias_flag = helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i])
code += " " + " " + "reinterpret_cast<const void*>(" + helper.var_idx("Mat_B", i) + ".device_ptr" + "),\n"
cnt += 1
if bias_flag:
code += " " + " " + "reinterpret_cast<const void*>(" + helper.var_idx("Mat_C", i) + ".device_ptr" + "),\n"
cnt += 1
else:
code += " " + " " + "reinterpret_cast<const void*>(NULL),\n"
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_value = str(arg[2])
code += " " + " " + helper.type_2_cutlass_type(acc_tp) + "(" + arg_value + "),\n"
if i != self.b2b_num - 1:
code += " " + " " + "reinterpret_cast<void*>(" + helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr" + "),\n"
else:
code += " " + " " + "reinterpret_cast<void*>(" + helper.var_idx("Mat_D", i) + ".device_ptr" + ")};\n"
code += " " + "TI(FUSED_CUTLASS);\n"
code += " " + "for(int i = 0; i < 100; i++){\n"
code += " " + " " + "one_api(arguments, sm, NULL);\n"
code += " " + "}\n"
code += " " + "TO(FUSED_CUTLASS, \"FUSED_CUTLASS\", 100);\n"
code += "\n"
for i in range(self.b2b_num):
code_this = ""
N_str = str(self.fuse_gemm_info[i]['mnk'][1])
code_this += " " + helper.var_idx("typename Gemm", i) + helper.var_idx("::Arguments arguments_", i) + "{\n"
code_this += " " + " " + helper.var_idx("problem_size_", i) + ",\n"
ldmA = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
ldmA = "K0"
ldmB = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
ldmB = "K0"
ldmC = str(self.fuse_gemm_info[i]['mnk'][1])
ldmBias = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i]))
if self.fuse_gemm_info[i]['A_format'] is 'Col':
ldmA = "M"
if self.fuse_gemm_info[i]['B_format'] is 'Row':
ldmB = str(self.fuse_gemm_info[i]['mnk'][1])
if self.fuse_gemm_info[i]['C_format'] is 'Col':
ldmC = "M"
if i == 0:
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("Mat_A", i) + ".device_ptr), " + ldmA + "}, " + "M * " + ldmA + ",\n"
else:
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("Mat_D_cutlass_ref", i - 1) + ".device_ptr), " + ldmA + "}, " + "M * " + ldmA + ",\n"
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("Mat_B", i) + ".device_ptr), " + ldmB + "}, " + N_str + " * " + ldmB + ",\n"
M_bias = str(helper.get_epilogue_bias_shape(self.fuse_gemm_info[i])[0])
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("Mat_C", i) + ".device_ptr), " + ldmBias + "}, " + M_bias + " * " + N_str + ",\n"
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr), " + ldmC + "}, " + "M * " + ldmC + ",\n"
code_this += " " + " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i)
for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]):
arg_value = str(epilogue_arg[2])
code_this += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_value) + ")"
code_this += " " + " },\n"
code_this += " " + " " + "B};\n"
code += code_this
code += " " + "TI(UNFUSED_CUTLASS);\n"
code += " " + "for(int i = 0; i < 100; i++){\n"
code += " " + " " + self.gen_class_name + "_verify(\n"
for i in range(self.b2b_num):
code += " " + " " + " " + helper.var_idx("arguments_", i) + ",\n"
code += " " + " " + " " + "NULL);\n"
code += " " + "}\n"
code += " " + "TO(UNFUSED_CUTLASS, \"UNFUSED_CUTLASS\", 100);\n"
code += " " + helper.var_idx("Mat_D_cutlass_ref", self.b2b_num - 1) + ".d2h();\n"
code += " " + helper.var_idx("Mat_D", self.b2b_num - 1) + ".d2h();\n"
code += " " + helper.var_idx("check_result(Mat_D_cutlass_ref", self.b2b_num - 1) + helper.var_idx(".host_ptr, Mat_D", self.b2b_num - 1) \
+ helper.var_idx(".host_ptr, Mat_D", self.b2b_num - 1) + ".elements);\n"
code += "\n\n}\n"
with open(self.sample_dir + "sample.cu", "w+") as f:
f.write(code)
| examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_sample.py/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_sample.py",
"repo_id": "examples",
"token_count": 5946
} | 6 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/scale_type.h"
#include "cutlass/epilogue/thread/linear_combination_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation.
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class LeftSiLUAndMul {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
struct Params{};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LeftSiLUAndMul(Params const &/*params*/) {}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return true;
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
assert(false);
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &lhs,
FragmentAccumulator const &rhs) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_to_compute;
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> compute_to_output;
ComputeFragment converted_lhs = accumulator_to_compute(lhs);
ComputeFragment converted_rhs = accumulator_to_compute(rhs);
cutlass::epilogue::thread::SiLu<ComputeFragment> silu;
cutlass::multiplies<ComputeFragment> mul;
auto silu_lhs = silu(converted_lhs);
return compute_to_output(mul(silu_lhs, converted_rhs));
}
CUTLASS_HOST_DEVICE
ElementOutput operator()(
ElementAccumulator const& lhs,
ElementAccumulator const& rhs
) const {
ElementCompute convert_lhs(lhs);
ElementCompute convert_rhs(rhs);
cutlass::epilogue::thread::SiLu<ElementCompute> silu;
cutlass::multiplies<ElementCompute> mul;
auto silu_lhs = silu(convert_lhs);
return ElementOutput(mul(silu_lhs, convert_rhs));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/45_dual_gemm/thread/left_silu_and_mul.h/0 | {
"file_path": "examples/45_dual_gemm/thread/left_silu_and_mul.h",
"repo_id": "examples",
"token_count": 1804
} | 7 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cuda_runtime.h"
#include <iostream>
/**
* Panic wrapper for unwinding CUTLASS errors
*/
#define CUTLASS_CHECK(status) \
{ \
cutlass::Status error = status; \
if (error != cutlass::Status::kSuccess) { \
std::cerr << "Got cutlass error: " << cutlassGetStatusString(error) << " at: " << __LINE__ \
<< std::endl; \
exit(EXIT_FAILURE); \
} \
}
/**
* Panic wrapper for unwinding CUDA runtime errors
*/
#define CUDA_CHECK(status) \
{ \
cudaError_t error = status; \
if (error != cudaSuccess) { \
std::cerr << "Got bad cuda status: " << cudaGetErrorString(error) \
<< " at line: " << __LINE__ << std::endl; \
exit(EXIT_FAILURE); \
} \
}
/**
* GPU timer for recording the elapsed time across kernel(s) launched in GPU stream
*/
struct GpuTimer
{
cudaStream_t _stream_id;
cudaEvent_t _start;
cudaEvent_t _stop;
/// Constructor
GpuTimer() : _stream_id(0)
{
CUDA_CHECK(cudaEventCreate(&_start));
CUDA_CHECK(cudaEventCreate(&_stop));
}
/// Destructor
~GpuTimer()
{
CUDA_CHECK(cudaEventDestroy(_start));
CUDA_CHECK(cudaEventDestroy(_stop));
}
/// Start the timer for a given stream (defaults to the default stream)
void start(cudaStream_t stream_id = 0)
{
_stream_id = stream_id;
CUDA_CHECK(cudaEventRecord(_start, _stream_id));
}
/// Stop the timer
void stop()
{
CUDA_CHECK(cudaEventRecord(_stop, _stream_id));
}
/// Return the elapsed time (in milliseconds)
float elapsed_millis()
{
float elapsed = 0.0;
CUDA_CHECK(cudaEventSynchronize(_stop));
CUDA_CHECK(cudaEventElapsedTime(&elapsed, _start, _stop));
return elapsed;
}
};
| examples/common/helper.h/0 | {
"file_path": "examples/common/helper.h",
"repo_id": "examples",
"token_count": 2059
} | 8 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/copy.hpp>
// Config
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) && (__CUDACC_VER_MAJOR__ >= 12))
# define CUTE_ARCH_STSM_SM90_ENABLED
# define CUTE_ARCH_TMA_SM90_ENABLED
#endif
#if defined(CUTE_ARCH_TMA_SM90_ENABLED) && \
((__CUDACC_VER_MAJOR__ > 12) || ((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ >= 3)))
# define CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED
#endif
namespace cute
{
struct SM90_U32x1_STSM_N
{
using SRegisters = uint32_t[1];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src,
uint128_t & smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x1.m8n8.shared.b16 [%0], {%1};\n"
:: "r"(smem_int_ptr),
"r"(src));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U32x2_STSM_N
{
using SRegisters = uint32_t[2];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src0, uint32_t const& src1,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x2.m8n8.shared.b16 [%0], {%1, %2};\n"
:: "r"(smem_int_ptr),
"r"(src0), "r"(src1));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U32x4_STSM_N
{
using SRegisters = uint32_t[4];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src0, uint32_t const& src1, uint32_t const& src2, uint32_t const& src3,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x4.m8n8.shared.b16 [%0], {%1, %2, %3, %4};\n"
:: "r"(smem_int_ptr),
"r"(src0), "r"(src1), "r"(src2), "r"(src3));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U16x2_STSM_T
{
using SRegisters = uint32_t[1];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x1.trans.m8n8.shared.b16 [%0], {%1};\n"
:: "r"(smem_int_ptr),
"r"(src));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U16x4_STSM_T
{
using SRegisters = uint32_t[2];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src0, uint32_t const& src1,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x2.trans.m8n8.shared.b16 [%0], {%1, %2};\n"
:: "r"(smem_int_ptr),
"r"(src0), "r"(src1));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U16x8_STSM_T
{
using SRegisters = uint32_t[4];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src0, uint32_t const& src1, uint32_t const& src2, uint32_t const& src3,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x4.trans.m8n8.shared.b16 [%0], {%1, %2, %3, %4};\n"
:: "r"(smem_int_ptr),
"r"(src0), "r"(src1), "r"(src2), "r"(src3));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
//
// Legacy STSM interfaces that aren't very useful
//
template <class T>
CUTE_HOST_DEVICE
void
copy_stsm(T const* const rmem_ptr,
uint128_t* const smem_ptr)
{
uint32_t const* reg_ptr = reinterpret_cast<uint32_t const*>(rmem_ptr);
// if constexpr
if (sizeof(T) == 4) {
SM90_U32x1_STSM_N::copy(reg_ptr[0], smem_ptr[0]);
}
else if (sizeof(T) == 8) {
SM90_U32x2_STSM_N::copy(reg_ptr[0], reg_ptr[1], smem_ptr[0]);
}
else if (sizeof(T) == 16) {
SM90_U32x4_STSM_N::copy(reg_ptr[0], reg_ptr[1], reg_ptr[2], reg_ptr[3], smem_ptr[0]);
}
else {
static_assert(sizeof(T) == 4 || sizeof(T) == 8 || sizeof(T) == 16, "sizeof(T) is not supported");
}
}
template <class T>
CUTE_HOST_DEVICE
void
copy_stsm_trans(T const* const rmem_ptr,
uint128_t* const smem_ptr)
{
uint32_t const* reg_ptr = reinterpret_cast<uint32_t const*>(rmem_ptr);
// if constexpr
if (sizeof(T) == 4) {
SM90_U16x2_STSM_T::copy(reg_ptr[0], smem_ptr[0]);
}
else if (sizeof(T) == 8) {
SM90_U16x4_STSM_T::copy(reg_ptr[0], reg_ptr[1], smem_ptr[0]);
}
else if (sizeof(T) == 16) {
SM90_U16x8_STSM_T::copy(reg_ptr[0], reg_ptr[1], reg_ptr[2], reg_ptr[3], smem_ptr[0]);
}
else {
static_assert(sizeof(T) == 4 || sizeof(T) == 8 || sizeof(T) == 16, "sizeof(T) is not supported");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // end namespace cute
////////////////////////////////////////////////////////////////////////////////////////////////////
#include <cute/arch/copy_sm90_desc.hpp>
#include <cute/arch/copy_sm90_tma.hpp>
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cute/arch/copy_sm90.hpp/0 | {
"file_path": "include/cute/arch/copy_sm90.hpp",
"repo_id": "include",
"token_count": 3157
} | 9 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/arch/copy_sm80.hpp>
#include <cute/atom/copy_traits.hpp>
#include <cute/layout.hpp>
namespace cute
{
template <class S, class D>
struct Copy_Traits<SM80_CP_ASYNC_CACHEALWAYS<S,D>>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,Int<sizeof_bits<S>::value>>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,Int<sizeof_bits<D>::value>>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// Construct a zfill variant with a given predicate value
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM80_CP_ASYNC_CACHEALWAYS_ZFILL<S,D>>
with(bool pred) const {
return {pred};
}
};
template <class S, class D>
struct Copy_Traits<SM80_CP_ASYNC_CACHEGLOBAL<S,D>>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,Int<sizeof_bits<S>::value>>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,Int<sizeof_bits<D>::value>>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// Construct a zfill variant with a given predicate value
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM80_CP_ASYNC_CACHEGLOBAL_ZFILL<S,D>>
with(bool pred) const {
return {pred};
}
};
template <class S, class D>
struct Copy_Traits<SM80_CP_ASYNC_CACHEALWAYS_ZFILL<S,D>>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,Int<sizeof_bits<S>::value>>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,Int<sizeof_bits<D>::value>>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// Predicate value that determines whether to load or zfill
bool pred = false;
// Overload copy_unpack for zfill variant to pass the predicate into the op
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr
void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_gmem<TS>::value, "Expected gmem source for cp.async.");
static_assert(is_smem<TD>::value, "Expected smem destination for cp.async.");
Tensor rS = recast<S>(src);
Tensor rD = recast<D>(dst);
CUTE_STATIC_ASSERT_V(size(rS) == Int<1>{},
"In CopyAtom, src layout doesn't vectorize into registers. This src layout is incompatible with this tiled copy.");
CUTE_STATIC_ASSERT_V(size(rD) == Int<1>{},
"In CopyAtom, dst layout doesn't vectorize into registers. This dst layout is incompatible with this tiled copy.");
SM80_CP_ASYNC_CACHEALWAYS_ZFILL<S,D>::copy(rS[0], rD[0], traits.pred);
}
};
template <class S, class D>
struct Copy_Traits<SM80_CP_ASYNC_CACHEGLOBAL_ZFILL<S,D>>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,Int<sizeof_bits<S>::value>>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,Int<sizeof_bits<D>::value>>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// Predicate value that determines whether to load or zfill
bool pred = false;
// Overload copy_unpack for zfill variant to pass the predicate into the op
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr
void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_gmem<TS>::value, "Expected gmem source for cp.async.");
static_assert(is_smem<TD>::value, "Expected smem destination for cp.async.");
Tensor rS = recast<S>(src);
Tensor rD = recast<D>(dst);
CUTE_STATIC_ASSERT_V(size(rS) == Int<1>{},
"In CopyAtom, src layout doesn't vectorize into registers. This src layout is incompatible with this tiled copy.");
CUTE_STATIC_ASSERT_V(size(rD) == Int<1>{},
"In CopyAtom, dst layout doesn't vectorize into registers. This dst layout is incompatible with this tiled copy.");
SM80_CP_ASYNC_CACHEGLOBAL_ZFILL<S,D>::copy(rS[0], rD[0], traits.pred);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Element copy selector
template <class SrcTensor, class DstTensor>
CUTE_HOST_DEVICE constexpr
auto
select_elementwise_copy(SrcTensor const&, DstTensor const&)
{
using SrcType = typename SrcTensor::value_type;
using DstType = typename DstTensor::value_type;
#if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED)
if constexpr (is_gmem<SrcTensor>::value && is_smem<DstTensor>::value &&
sizeof(SrcType) == sizeof(DstType) &&
(sizeof(SrcType) == 4 || sizeof(SrcType) == 8 || sizeof(SrcType) == 16))
{
return SM80_CP_ASYNC_CACHEALWAYS<SrcType,DstType>{};
} else {
return UniversalCopy<SrcType,DstType>{};
}
CUTE_GCC_UNREACHABLE;
#else
return UniversalCopy<SrcType,DstType>{};
#endif
}
}
| include/cute/atom/copy_traits_sm80.hpp/0 | {
"file_path": "include/cute/atom/copy_traits_sm80.hpp",
"repo_id": "include",
"token_count": 2594
} | 10 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cutlass/fast_math.h>
namespace cute
{
//
// Common Operations
//
template <class T, class U,
__CUTE_REQUIRES(is_arithmetic<T>::value &&
is_arithmetic<U>::value)>
CUTE_HOST_DEVICE constexpr
auto
max(T const& t, U const& u) {
return t < u ? u : t;
}
template <class T, class U,
__CUTE_REQUIRES(is_arithmetic<T>::value &&
is_arithmetic<U>::value)>
CUTE_HOST_DEVICE constexpr
auto
min(T const& t, U const& u) {
return t < u ? t : u;
}
template <class T,
__CUTE_REQUIRES(is_arithmetic<T>::value)>
CUTE_HOST_DEVICE constexpr
auto
abs(T const& t) {
if constexpr (is_signed<T>::value) {
return t < T(0) ? -t : t;
} else {
return t;
}
CUTE_GCC_UNREACHABLE;
}
// Returns 1 if x > 0, -1 if x < 0, and 0 if x is zero.
template <class T,
__CUTE_REQUIRES(is_arithmetic<T>::value)>
CUTE_HOST_DEVICE constexpr
int
signum(T const& x) {
if constexpr (is_signed<T>::value) {
return (T(0) < x) - (x < T(0));
} else {
return T(0) < x;
}
CUTE_GCC_UNREACHABLE;
}
//
// C++17 <numeric> operations
//
// Greatest common divisor of two positive integers
template <class T, class U,
__CUTE_REQUIRES(is_std_integral<T>::value &&
is_std_integral<U>::value)>
CUTE_HOST_DEVICE constexpr
cute::common_type_t<T, U>
gcd(T t, U u) {
while (true) {
if (t == 0) { return u; }
u %= t;
if (u == 0) { return t; }
t %= u;
}
}
// Least common multiple of two positive integers
template <class T, class U,
__CUTE_REQUIRES(is_std_integral<T>::value &&
is_std_integral<U>::value)>
CUTE_HOST_DEVICE constexpr
cute::common_type_t<T, U>
lcm(T const& t, U const& u) {
return (t / gcd(t,u)) * u;
}
//
// C++20 <bit> operations
//
// Checks if a number is an integral power of two
template <class T>
CUTE_HOST_DEVICE constexpr
bool
has_single_bit(T x) {
return x != 0 && (x & (x - 1)) == 0;
}
// Smallest number of bits needed to represent the given value
// For x == 0, this is 0
// For x != 0, this is 1 + floor(log2(x))
// bit_width( 0b0000 ) = 0
// bit_width( 0b0001 ) = 1
// bit_width( 0b0010 ) = 2
// bit_width( 0b0011 ) = 2
// bit_width( 0b0100 ) = 3
// bit_width( 0b0101 ) = 3
// bit_width( 0b0110 ) = 3
// bit_width( 0b0111 ) = 3
template <class T>
CUTE_HOST_DEVICE constexpr
T
bit_width(T x) {
static_assert(is_unsigned<T>::value, "Only to be used for unsigned types.");
constexpr int N = (numeric_limits<T>::digits == 64 ? 6 :
(numeric_limits<T>::digits == 32 ? 5 :
(numeric_limits<T>::digits == 16 ? 4 :
(numeric_limits<T>::digits == 8 ? 3 : (assert(false),0)))));
T r = 0;
for (int i = N - 1; i >= 0; --i) {
T shift = (x > ((T(1) << (T(1) << i))-1)) << i;
x >>= shift;
r |= shift;
}
return r + (x != 0);
}
// Smallest integral power of two not less than the given value
// bit_ceil( 0b00000000 ) = 0b00000001
// bit_ceil( 0b00000001 ) = 0b00000001
// bit_ceil( 0b00000010 ) = 0b00000010
// bit_ceil( 0b00000011 ) = 0b00000100
// bit_ceil( 0b00000100 ) = 0b00000100
// bit_ceil( 0b00000101 ) = 0b00001000
// bit_ceil( 0b00000110 ) = 0b00001000
// bit_ceil( 0b00000111 ) = 0b00001000
// bit_ceil( 0b00001000 ) = 0b00001000
// bit_ceil( 0b00001001 ) = 0b00010000
template <class T>
CUTE_HOST_DEVICE constexpr
T
bit_ceil(T x) {
return x == 0 ? T(1) : (T(1) << bit_width(x - 1));
}
// Largest integral power of two not greater than the given value
// bit_floor( 0b00000000 ) = 0b00000000
// bit_floor( 0b00000001 ) = 0b00000001
// bit_floor( 0b00000010 ) = 0b00000010
// bit_floor( 0b00000011 ) = 0b00000010
// bit_floor( 0b00000100 ) = 0b00000100
// bit_floor( 0b00000101 ) = 0b00000100
// bit_floor( 0b00000110 ) = 0b00000100
// bit_floor( 0b00000111 ) = 0b00000100
// bit_floor( 0b00001000 ) = 0b00001000
// bit_floor( 0b00001001 ) = 0b00001000
template <class T>
CUTE_HOST_DEVICE constexpr
T
bit_floor(T x) {
return x == 0 ? 0 : (T(1) << (bit_width(x) - 1));
}
template <class T>
CUTE_HOST_DEVICE constexpr T rotl(T x, int s);
template <class T>
CUTE_HOST_DEVICE constexpr T rotr(T x, int s);
// Computes the result of circular bitwise left-rotation
template <class T>
CUTE_HOST_DEVICE constexpr
T
rotl(T x, int s) {
constexpr int N = numeric_limits<T>::digits;
return static_cast<T>(s == 0 ? x : s > 0 ? (x << s) | (x >> (N - s)) : rotr(x, -s));
}
// Computes the result of circular bitwise right-rotation
template <class T>
CUTE_HOST_DEVICE constexpr
T
rotr(T x, int s) {
constexpr int N = numeric_limits<T>::digits;
return static_cast<T>(s == 0 ? x : s > 0 ? (x >> s) | (x << (N - s)) : rotl(x, -s));
}
// Counts the number of consecutive 0 bits, starting from the most significant bit
// countl_zero( 0b00000000 ) = 8
// countl_zero( 0b11111111 ) = 0
// countl_zero( 0b00011100 ) = 3
template <class T>
CUTE_HOST_DEVICE constexpr
T
countl_zero(T x) {
return numeric_limits<T>::digits - bit_width(x);
}
// Counts the number of consecutive 1 bits, starting from the most significant bit
// countl_one( 0b00000000 ) = 0
// countl_one( 0b11111111 ) = 8
// countl_one( 0b11100011 ) = 3
template <class T>
CUTE_HOST_DEVICE constexpr
T
countl_one(T x) {
return countl_zero(~x);
}
// Counts the number of consecutive 0 bits, starting from the least significant bit
// countr_zero( 0b00000000 ) = 8
// countr_zero( 0b11111111 ) = 0
// countr_zero( 0b00011100 ) = 2
template <class T>
CUTE_HOST_DEVICE constexpr
T
countr_zero(T x) {
return x == 0 ? numeric_limits<T>::digits : bit_width(T(x & T(-x))) - 1; // bit_width of the LSB
}
// Counts the number of consecutive 1 bits, starting from the least significant bit
// countr_one( 0b00000000 ) = 0
// countr_one( 0b11111111 ) = 8
// countr_one( 0b11100011 ) = 2
template <class T>
CUTE_HOST_DEVICE constexpr
T
countr_one(T x) {
return countr_zero(~x);
}
// Counts the number of 1 bits in an unsigned integer
// popcount( 0b00000000 ) = 0
// popcount( 0b11111111 ) = 8
// popcount( 0b00011101 ) = 4
template <class T>
CUTE_HOST_DEVICE constexpr
int
popcount(T x) {
int c = 0;
while (x) {
++c;
x &= x - 1; // clear the least significant bit set
}
return c;
}
//
// Custom operations
//
// Computes the result of bitwise left-shift
template <class T>
CUTE_HOST_DEVICE constexpr
T
shiftl(T x, int s) {
return s >= 0 ? (x << s) : (x >> -s);
}
// Computes the result of bitwise right-shift
template <class T>
CUTE_HOST_DEVICE constexpr
T
shiftr(T x, int s) {
return s >= 0 ? (x >> s) : (x << -s);
}
// Safe divide
// @pre t % u == 0
// @result t / u
template <class T, class U,
__CUTE_REQUIRES(is_std_integral<T>::value &&
is_std_integral<U>::value)>
CUTE_HOST_DEVICE constexpr
auto
safe_div(T const& t, U const& u) {
//assert(t % u == 0);
return t / u;
}
/**
* log2 computation
*/
template <class T>
CUTE_HOST_DEVICE constexpr
int32_t
log_2(T x) {
assert(x > 0);
static_assert(is_unsigned<T>::value, "Only to be used for unsigned integral types.");
return static_cast<int32_t>(bit_width(x)) - 1;
}
template <class IntDiv, class IntMod>
struct DivModReturnType {
IntDiv div_;
IntMod mod_;
CUTE_HOST_DEVICE constexpr
DivModReturnType(IntDiv const& div, IntMod const& mod) : div_(div), mod_(mod) {}
};
// General divmod
template <class CInt0, class CInt1>
CUTE_HOST_DEVICE constexpr
auto
divmod(CInt0 const& a, CInt1 const& b) {
return DivModReturnType{a / b, a % b};
}
// Specialized function with fastDivmod input
template <class CInt>
CUTE_HOST_DEVICE constexpr
auto
divmod(CInt const& a, cutlass::FastDivmod const& b) {
using val_div_type = typename cutlass::FastDivmod::value_div_type;
using val_mod_type = typename cutlass::FastDivmod::value_mod_type;
val_div_type div = 0;
val_mod_type mod = 0;
b(div, mod, a);
return DivModReturnType{div, mod};
}
} // namespace cute
| include/cute/numeric/math.hpp/0 | {
"file_path": "include/cute/numeric/math.hpp",
"repo_id": "include",
"token_count": 3882
} | 11 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/type_traits>
#include <cuda/std/utility>
#include <cuda/std/cstddef>
#include <cuda/std/cstdint>
#include <cuda/std/limits>
#else
#include <type_traits>
#include <utility> // tuple_size, tuple_element
#include <cstddef> // ptrdiff_t
#include <cstdint> // uintptr_t
#include <limits> // numeric_limits
#endif
#include <cute/config.hpp>
namespace cute
{
using CUTE_STL_NAMESPACE::enable_if;
using CUTE_STL_NAMESPACE::enable_if_t;
}
#define __CUTE_REQUIRES(...) typename cute::enable_if<(__VA_ARGS__)>::type* = nullptr
#define __CUTE_REQUIRES_V(...) typename cute::enable_if<decltype((__VA_ARGS__))::value>::type* = nullptr
namespace cute
{
// <type_traits>
using CUTE_STL_NAMESPACE::conjunction;
using CUTE_STL_NAMESPACE::conjunction_v;
using CUTE_STL_NAMESPACE::disjunction;
using CUTE_STL_NAMESPACE::disjunction_v;
using CUTE_STL_NAMESPACE::negation;
using CUTE_STL_NAMESPACE::negation_v;
using CUTE_STL_NAMESPACE::void_t;
using CUTE_STL_NAMESPACE::is_void_v;
using CUTE_STL_NAMESPACE::is_base_of;
using CUTE_STL_NAMESPACE::is_base_of_v;
using CUTE_STL_NAMESPACE::is_const;
using CUTE_STL_NAMESPACE::is_const_v;
using CUTE_STL_NAMESPACE::is_volatile;
using CUTE_STL_NAMESPACE::is_volatile_v;
// using CUTE_STL_NAMESPACE::true_type;
// using CUTE_STL_NAMESPACE::false_type;
using CUTE_STL_NAMESPACE::conditional;
using CUTE_STL_NAMESPACE::conditional_t;
using CUTE_STL_NAMESPACE::add_const_t;
using CUTE_STL_NAMESPACE::remove_const_t;
using CUTE_STL_NAMESPACE::remove_cv_t;
using CUTE_STL_NAMESPACE::remove_reference_t;
using CUTE_STL_NAMESPACE::extent;
using CUTE_STL_NAMESPACE::remove_extent;
using CUTE_STL_NAMESPACE::decay;
using CUTE_STL_NAMESPACE::decay_t;
using CUTE_STL_NAMESPACE::is_lvalue_reference;
using CUTE_STL_NAMESPACE::is_lvalue_reference_v;
using CUTE_STL_NAMESPACE::is_reference;
using CUTE_STL_NAMESPACE::is_trivially_copyable;
using CUTE_STL_NAMESPACE::is_convertible;
using CUTE_STL_NAMESPACE::is_convertible_v;
using CUTE_STL_NAMESPACE::is_same;
using CUTE_STL_NAMESPACE::is_same_v;
using CUTE_STL_NAMESPACE::is_constructible;
using CUTE_STL_NAMESPACE::is_constructible_v;
using CUTE_STL_NAMESPACE::is_default_constructible;
using CUTE_STL_NAMESPACE::is_default_constructible_v;
using CUTE_STL_NAMESPACE::is_standard_layout;
using CUTE_STL_NAMESPACE::is_standard_layout_v;
using CUTE_STL_NAMESPACE::is_arithmetic;
using CUTE_STL_NAMESPACE::is_unsigned;
using CUTE_STL_NAMESPACE::is_unsigned_v;
using CUTE_STL_NAMESPACE::is_signed;
using CUTE_STL_NAMESPACE::is_signed_v;
using CUTE_STL_NAMESPACE::make_signed;
using CUTE_STL_NAMESPACE::make_signed_t;
// using CUTE_STL_NAMESPACE::is_integral;
template <class T>
using is_std_integral = CUTE_STL_NAMESPACE::is_integral<T>;
using CUTE_STL_NAMESPACE::is_empty;
using CUTE_STL_NAMESPACE::is_empty_v;
using CUTE_STL_NAMESPACE::invoke_result_t;
using CUTE_STL_NAMESPACE::common_type;
using CUTE_STL_NAMESPACE::common_type_t;
using CUTE_STL_NAMESPACE::remove_pointer;
using CUTE_STL_NAMESPACE::remove_pointer_t;
using CUTE_STL_NAMESPACE::alignment_of;
using CUTE_STL_NAMESPACE::alignment_of_v;
// <utility>
using CUTE_STL_NAMESPACE::declval;
template <class T>
constexpr T&& forward(remove_reference_t<T>& t) noexcept
{
return static_cast<T&&>(t);
}
template <class T>
constexpr T&& forward(remove_reference_t<T>&& t) noexcept
{
static_assert(! is_lvalue_reference_v<T>, "T cannot be an lvalue reference (e.g., U&).");
return static_cast<T&&>(t);
}
template <class T>
constexpr remove_reference_t<T>&& move(T&& t) noexcept
{
return static_cast<remove_reference_t<T>&&>(t);
}
// <limits>
using CUTE_STL_NAMESPACE::numeric_limits;
// <cstddef>
using CUTE_STL_NAMESPACE::ptrdiff_t;
// <cstdint>
using CUTE_STL_NAMESPACE::uintptr_t;
// C++20
// using std::remove_cvref;
template <class T>
struct remove_cvref {
using type = remove_cv_t<remove_reference_t<T>>;
};
// C++20
// using std::remove_cvref_t;
template <class T>
using remove_cvref_t = typename remove_cvref<T>::type;
//
// dependent_false
//
// @brief An always-false value that depends on one or more template parameters.
// See
// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1830r1.pdf
// https://github.com/cplusplus/papers/issues/572
// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2022/p2593r0.html
template <class... Args>
inline constexpr bool dependent_false = false;
//
// tuple_size, tuple_element
//
// @brief CuTe-local tuple-traits to prevent conflicts with other libraries.
// For cute:: types, we specialize std::tuple-traits, which is explicitly allowed.
// cute::tuple, cute::array, cute::array_subbyte, etc
// But CuTe wants to treat some external types as tuples as well. For those,
// we specialize cute::tuple-traits to avoid polluting external traits.
// dim3, uint3, etc
template <class T, class = void>
struct tuple_size;
template <class T>
struct tuple_size<T,void_t<typename CUTE_STL_NAMESPACE::tuple_size<T>::type>> : CUTE_STL_NAMESPACE::integral_constant<size_t, CUTE_STL_NAMESPACE::tuple_size<T>::value> {};
// S = : std::integral_constant<std::size_t, std::tuple_size<T>::value> {};
template <class T>
constexpr size_t tuple_size_v = tuple_size<T>::value;
template <size_t I, class T, class = void>
struct tuple_element;
template <size_t I, class T>
struct tuple_element<I,T,void_t<typename CUTE_STL_NAMESPACE::tuple_element<I,T>::type>> : CUTE_STL_NAMESPACE::tuple_element<I,T> {};
template <size_t I, class T>
using tuple_element_t = typename tuple_element<I,T>::type;
//
// is_valid
//
namespace detail {
template <class F, class... Args, class = decltype(declval<F&&>()(declval<Args&&>()...))>
CUTE_HOST_DEVICE constexpr auto
is_valid_impl(int) { return CUTE_STL_NAMESPACE::true_type{}; }
template <class F, class... Args>
CUTE_HOST_DEVICE constexpr auto
is_valid_impl(...) { return CUTE_STL_NAMESPACE::false_type{}; }
template <class F>
struct is_valid_fn {
template <class... Args>
CUTE_HOST_DEVICE constexpr auto
operator()(Args&&...) const { return is_valid_impl<F, Args&&...>(int{}); }
};
} // end namespace detail
template <class F>
CUTE_HOST_DEVICE constexpr auto
is_valid(F&&) {
return detail::is_valid_fn<F&&>{};
}
template <class F, class... Args>
CUTE_HOST_DEVICE constexpr auto
is_valid(F&&, Args&&...) {
return detail::is_valid_impl<F&&, Args&&...>(int{});
}
template <bool B, template<class...> class True, template<class...> class False>
struct conditional_template {
template <class... U>
using type = True<U...>;
};
template <template<class...> class True, template<class...> class False>
struct conditional_template<false, True, False> {
template <class... U>
using type = False<U...>;
};
} // end namespace cute
| include/cute/util/type_traits.hpp/0 | {
"file_path": "include/cute/util/type_traits.hpp",
"repo_id": "include",
"token_count": 3269
} | 12 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
#if ((__CUDACC_VER_MAJOR__ > 11) || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 8))
#define CUTLASS_ARCH_MMA_SM90_F64_MMA_SUPPORTED
#if (!defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED))
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900))
#define CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED
#endif
#endif
#endif
#if (__CUDACC_VER_MAJOR__ >= 12)
#define CUTLASS_ARCH_MMA_SM90_SUPPORTED
#if (!defined(CUTLASS_ARCH_MMA_SM90_ENABLED))
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900))
#define CUTLASS_ARCH_MMA_SM90_ENABLED
#endif
#endif
#endif
#if ((__CUDACC_VER_MAJOR__ > 12) || ((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ >= 3)))
#define CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED
#endif
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
/// Matrix Multiply-Add 16x8x4 fp64
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F64 = F64 * F64 + F64
template <>
struct Mma<
gemm::GemmShape<16,8,4>,
32,
double,
layout::RowMajor,
double,
layout::ColumnMajor,
double,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,4>;
using ElementA = double;
using LayoutA = layout::RowMajor;
using FragmentA = Array<double, 2>;
using ElementB = double;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<double, 1>;
using ElementC = double;
using LayoutC = layout::RowMajor;
using FragmentC = Array<double, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm90;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED)
double const *A = reinterpret_cast<double const *>(&a);
double const *B = reinterpret_cast<double const *>(&b);
double const *C = reinterpret_cast<double const *>(&c);
double *D = reinterpret_cast<double *>(&d);
asm volatile("mma.sync.aligned.m16n8k4.row.col.f64.f64.f64.f64.rn {%0, %1, %2, %3}, {%4, %5}, {%6}, {%7, %8, %9, %10};\n"
: "=d"(D[0]), "=d"(D[1]), "=d"(D[2]), "=d"(D[3])
: "d"(A[0]), "d"(A[1]),
"d"(B[0]),
"d"(C[0]), "d"(C[1]), "d"(C[2]), "d"(C[3]));
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
/// Matrix Multiply-Add 16x8x8 fp64
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F64 = F64 * F64 + F64
template <>
struct Mma<
gemm::GemmShape<16,8,8>,
32,
double,
layout::RowMajor,
double,
layout::ColumnMajor,
double,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,8>;
using ElementA = double;
using LayoutA = layout::RowMajor;
using FragmentA = Array<double, 4>;
using ElementB = double;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<double, 2>;
using ElementC = double;
using LayoutC = layout::RowMajor;
using FragmentC = Array<double, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm90;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED)
double const *A = reinterpret_cast<double const *>(&a);
double const *B = reinterpret_cast<double const *>(&b);
double const *C = reinterpret_cast<double const *>(&c);
double *D = reinterpret_cast<double *>(&d);
asm volatile("mma.sync.aligned.m16n8k8.row.col.f64.f64.f64.f64 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%10, %11, %12, %13};\n"
: "=d"(D[0]), "=d"(d[1]), "=d"(d[2]), "=d"(d[3])
: "d"(A[0]), "d"(A[1]), "d"(A[2]), "d"(A[3]),
"d"(B[0]), "d"(B[1]),
"d"(C[0]), "d"(C[1]), "d"(C[2]), "d"(C[3]));
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
/// Matrix Multiply-Add 16x8x16 fp64
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F64 = F64 * F64 + F64
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
double,
layout::RowMajor,
double,
layout::ColumnMajor,
double,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = double;
using LayoutA = layout::RowMajor;
using FragmentA = Array<double, 8>;
using ElementB = double;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<double, 4>;
using ElementC = double;
using LayoutC = layout::RowMajor;
using FragmentC = Array<double, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm90;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED)
double const *A = reinterpret_cast<double const *>(&a);
double const *B = reinterpret_cast<double const *>(&b);
double const *C = reinterpret_cast<double const *>(&c);
double *D = reinterpret_cast<double *>(&d);
asm volatile("mma.sync.aligned.m16n8k16.row.col.f64.f64.f64.f64 {%0, %1, %2, %3}, {%4, %5, %6, %7, %8, %9, %10, %11}, {%12, %13, %14, %15}, {%16, %17, %18, %19};\n"
: "=d"(D[0]), "=d"(D[1]), "=d"(D[2]), "=d"(D[3])
: "d"(A[0]), "d"(A[2]), "d"(A[2]), "d"(A[3]), "d"(A[4]), "d"(A[5]), "d"(A[6]), "d"(A[7]),
"d"(B[0]), "d"(B[1]), "d"(B[2]), "d"(B[3]),
"d"(C[0]), "d"(C[1]), "d"(C[2]), "d"(C[3]));
#else
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/arch/mma_sm90.h/0 | {
"file_path": "include/cutlass/arch/mma_sm90.h",
"repo_id": "include",
"token_count": 3148
} | 13 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Basic include for CUTLASS BLAS3/HPC code.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/blas3_types.h"
#include "cutlass/coord.h"
#include "cutlass/complex.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines FillMode inversions
template <FillMode kFillMode>
struct InvertFillMode;
/// Invert FillMode lower to upper
template <>
struct InvertFillMode<FillMode::kLower> {
static FillMode const mode = FillMode::kUpper;
};
/// Invert FillMode upper to lower
template <>
struct InvertFillMode<FillMode::kUpper> {
static FillMode const mode = FillMode::kLower;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines SideMode inversions
template <SideMode kSideMode>
struct InvertSideMode;
/// Invert SideMode left to right
template <>
struct InvertSideMode<SideMode::kLeft> {
static SideMode const mode = SideMode::kRight;
};
/// Invert SideMode right to left
template <>
struct InvertSideMode<SideMode::kRight> {
static SideMode const mode = SideMode::kLeft;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines correct compare operation for Triangular matrix boundary
template <FillMode kFillMode, DiagType kDiagType = DiagType::kNonUnit>
struct TrMatrixCompareOp {
using Index = int32_t;
using Type = typename platform::conditional<
(kFillMode == FillMode::kLower),
greater_equal<Index>,
less_equal<Index>>::type;
};
template <FillMode kFillMode>
struct TrMatrixCompareOp <kFillMode, DiagType::kUnit> {
using Index = int32_t;
using Type = typename platform::conditional<
(kFillMode == FillMode::kLower),
greater_equal<Index>,
less_equal<Index>>::type;
};
template <FillMode kFillMode>
struct TrMatrixCompareOp <kFillMode, DiagType::kZero> {
using Index = int32_t;
using Type = typename platform::conditional<
(kFillMode == FillMode::kLower),
greater<Index>,
less<Index>>::type;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Returns precision in terms of bits (based on datatype) to fill tensors with.
// Defaults to 5 bits of mantissa for TF32 and FP32 (with implicit round-offs).
// Also defines acceptable mantissa result variance/error.
template <typename Element>
struct MantissaInBits {
static int constexpr bits = 5;
static double constexpr error = 1.0e-7;
};
// Full precision is supported for FP64
template <>
struct MantissaInBits<double> {
static int constexpr bits = 30;
static double constexpr error = 1.0e-15;
};
template <>
struct MantissaInBits<cutlass::complex<double>> {
static int constexpr bits = 30;
static double constexpr error = 1.0e-15;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/blas3.h/0 | {
"file_path": "include/cutlass/blas3.h",
"repo_id": "include",
"token_count": 1540
} | 14 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
// common
#include "cutlass/arch/mma.h"
#include "cutlass/cutlass.h"
#include "cutlass/arch/mma.h"
#include "cutlass/trace.h"
#include "cutlass/cluster_launch.hpp"
#include "cutlass/device_kernel.h"
#include "cutlass/conv/kernel/conv_universal.hpp"
#include "cutlass/gemm/gemm.h"
#include "cutlass/detail/layout.hpp"
#include "cutlass/cuda_host_adapter.hpp"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass::conv::device {
////////////////////////////////////////////////////////////////////////////////
/*!
ConvUniversalAdapter is a stateful, reusable handle built around a kernel
of type cutlass::conv::kernel::ConvUniversal.
It manages the lifetime of the underlying `kernel::Params` struct, and exposes APIs
to create it from the host facing arguments. For power users, static methods
are exposed that bypass the stateful methods or args->params lowering.
*/
template <class ConvKernel_>
class ConvUniversalAdapter
{
public:
using ConvKernel = ConvKernel_;
using TileShape = typename ConvKernel::TileShape;
using ElementA = typename ConvKernel::ElementA;
using ElementB = typename ConvKernel::ElementB;
using ElementC = typename ConvKernel::ElementC;
using ElementD = typename ConvKernel::ElementD;
using ElementAccumulator = typename ConvKernel::TiledMma::ValTypeC;
using DispatchPolicy = typename ConvKernel::DispatchPolicy;
using CollectiveMainloop = typename ConvKernel::CollectiveMainloop;
using CollectiveEpilogue = typename ConvKernel::CollectiveEpilogue;
static bool const kEnableCudaHostAdapter = CUTLASS_ENABLE_CUDA_HOST_ADAPTER;
// Tease out meta-information about the conv algorithm
static constexpr conv::Operator kConvolutionalOperator = DispatchPolicy::ConvOp;
static constexpr int NumSpatialDimensions = ConvKernel::NumSpatialDimensions;
// If our TiledMMA's instruction thread layout size is larger than 1, we know its a tensorop!
using OperatorClass = cute::conditional_t<
(cute::size(typename ConvKernel::TiledMma::AtomThrID{}) > 1),
cutlass::arch::OpClassTensorOp, cutlass::arch::OpClassSimt>;
using ArchTag = typename ConvKernel::ArchTag;
// Assume TiledMma's ShapeMNK is the same as 2.x's ThreadblockShape
using ThreadblockShape = cutlass::gemm::GemmShape<
cute::size<0>(TileShape{}),
cute::size<1>(TileShape{}),
cute::size<2>(TileShape{})>;
using ClusterShape = cutlass::gemm::GemmShape<
cute::size<0>(typename ConvKernel::DispatchPolicy::ClusterShape{}),
cute::size<1>(typename ConvKernel::DispatchPolicy::ClusterShape{}),
cute::size<2>(typename ConvKernel::DispatchPolicy::ClusterShape{})>;
// Instruction shape is easy too, since we get that directly from our TiledMma's atom shape
using InstructionShape = cutlass::gemm::GemmShape<
cute::size<0>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{}),
cute::size<1>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{}),
cute::size<2>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{})>;
// Legacy: provide a correct warp count, but no reliable warp shape
static int const kThreadCount = ConvKernel::MaxThreadsPerBlock;
// Warp shape is not a primary API type in 3.x
// But we can best approximate it by inspecting the TiledMma
// For this, we make the assumption that we always have 4 warps along M, and rest along N, none along K
// We also always round up the warp count to 4 if the tiled mma is smaller than 128 threads
static constexpr int WarpsInMma = cute::max(4, CUTE_STATIC_V(cute::size(typename ConvKernel::TiledMma{})) / 32);
static constexpr int WarpsInMmaM = 4;
static constexpr int WarpsInMmaN = cute::ceil_div(WarpsInMma, WarpsInMmaM);
using WarpCount = cutlass::gemm::GemmShape<WarpsInMmaM, WarpsInMmaN, 1>;
using WarpShape = cutlass::gemm::GemmShape<
CUTE_STATIC_V(cute::tile_size<0>(typename CollectiveMainloop::TiledMma{})) / WarpsInMmaM,
CUTE_STATIC_V(cute::tile_size<1>(typename CollectiveMainloop::TiledMma{})) / WarpsInMmaN,
CUTE_STATIC_V(cute::tile_size<2>(typename CollectiveMainloop::TiledMma{}))>;
static int constexpr kStages = CollectiveMainloop::DispatchPolicy::Stages;
// Inspect TiledCopy for A and B to compute the alignment size
static int constexpr kAlignmentA = detail::get_alignment_count_from_gmem_tiled_copy<
typename CollectiveMainloop::GmemTiledCopyA, ElementA>();
static int constexpr kAlignmentB = detail::get_alignment_count_from_gmem_tiled_copy<
typename CollectiveMainloop::GmemTiledCopyB, ElementB>();
static int constexpr kAlignmentC = detail::get_alignment_count_from_gmem_tiled_copy<
typename CollectiveEpilogue::GmemTiledCopyC, ElementC>();
static int constexpr kAlignmentD = detail::get_alignment_count_from_gmem_tiled_copy<
typename CollectiveEpilogue::GmemTiledCopyD, ElementD>();
using EpilogueOutputOp = typename CollectiveEpilogue::ThreadEpilogueOp;
/// Argument structure: User API
using Arguments = typename ConvKernel::Arguments;
/// Argument structure: Kernel API
using Params = typename ConvKernel::Params;
private:
/// Kernel API parameters object
Params params_;
public:
/// Access the Params structure
Params const& params() const {
return params_;
}
/// Determines whether the conv can execute the given problem.
static Status
can_implement(Arguments const& args) {
if (ConvKernel::can_implement(args)) {
return Status::kSuccess;
}
else {
return Status::kInvalid;
}
}
/// Gets the workspace size
static size_t
get_workspace_size(Arguments const& args) {
size_t workspace_bytes = 0;
CUTLASS_TRACE_HOST(" workspace_bytes: " << workspace_bytes);
workspace_bytes += ConvKernel::get_workspace_size(args);
return workspace_bytes;
}
/// Computes the grid shape
static dim3
get_grid_shape(Arguments const& args, void* workspace = nullptr) {
auto tmp_params = ConvKernel::to_underlying_arguments(args, workspace);
return ConvKernel::get_grid_shape(tmp_params);
}
/// Computes the grid shape
static dim3
get_grid_shape(Params const& params) {
return ConvKernel::get_grid_shape(params);
}
/// Computes the maximum number of active blocks per multiprocessor
static int maximum_active_blocks(int /* smem_capacity */ = -1) {
CUTLASS_TRACE_HOST("ConvUniversal::maximum_active_blocks()");
int max_active_blocks = -1;
int smem_size = ConvKernel::SharedStorageSize;
// first, account for dynamic smem capacity if needed
cudaError_t result;
if (smem_size >= (48 << 10)) {
CUTLASS_TRACE_HOST(" Setting smem size to " << smem_size);
result = cudaFuncSetAttribute(
device_kernel<ConvKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (cudaSuccess != result) {
result = cudaGetLastError(); // to clear the error bit
CUTLASS_TRACE_HOST(
" cudaFuncSetAttribute() returned error: "
<< cudaGetErrorString(result));
return -1;
}
}
// query occupancy after setting smem size
result = cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks,
device_kernel<ConvKernel>,
ConvKernel::MaxThreadsPerBlock,
smem_size);
if (cudaSuccess != result) {
result = cudaGetLastError(); // to clear the error bit
CUTLASS_TRACE_HOST(
" cudaOccupancyMaxActiveBlocksPerMultiprocessor() returned error: "
<< cudaGetErrorString(result));
return -1;
}
CUTLASS_TRACE_HOST(" max_active_blocks: " << max_active_blocks);
return max_active_blocks;
}
/// Initializes conv state from arguments.
Status
initialize(
Arguments const& args,
void* workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr) {
CUTLASS_TRACE_HOST("ConvUniversal::initialize() - workspace "
<< workspace << ", stream: " << (stream ? "non-null" : "null"));
// Initialize the workspace
Status status = ConvKernel::initialize_workspace(args, workspace, stream, cuda_adapter);
if (status != Status::kSuccess) {
return status;
}
// Initialize the Params structure
params_ = ConvKernel::to_underlying_arguments(args, workspace);
// Don't set the function attributes - require the CudaHostAdapter to set it.
if constexpr (kEnableCudaHostAdapter) {
CUTLASS_ASSERT(cuda_adapter);
return Status::kSuccess;
}
else {
// account for dynamic smem capacity if needed
int smem_size = ConvKernel::SharedStorageSize;
if (smem_size >= (48 << 10)) {
CUTLASS_TRACE_HOST(" Setting smem size to " << smem_size);
cudaError_t result = cudaFuncSetAttribute(
device_kernel<ConvKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (cudaSuccess != result) {
result = cudaGetLastError(); // to clear the error bit
CUTLASS_TRACE_HOST(" cudaFuncSetAttribute() returned error: " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
}
return Status::kSuccess;
}
/// Update API is preserved in 3.0, but does not guarantee a lightweight update of params.
Status
update(Arguments const& args, void* workspace = nullptr) {
CUTLASS_TRACE_HOST("ConvUniversal()::update() - workspace: " << workspace);
size_t workspace_bytes = get_workspace_size(args);
if (workspace_bytes > 0 && nullptr == workspace) {
return Status::kErrorWorkspaceNull;
}
params_ = ConvKernel::to_underlying_arguments(args, workspace);
return Status::kSuccess;
}
/// Primary run() entry point API that is static allowing users to create and manage their own params.
/// Supplied params struct must be construct by calling ConvKernel::to_underling_arguments()
static Status
run(Params& params, cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr, int32_t kernel_index = 0) {
CUTLASS_TRACE_HOST("ConvUniversal::run()");
dim3 const block = ConvKernel::get_block_shape();
dim3 const grid = get_grid_shape(params);
// configure smem size and carveout
int smem_size = ConvKernel::SharedStorageSize;
Status launch_result;
// Use extended launch API only for mainloops that use it
if constexpr (ConvKernel::ArchTag::kMinComputeCapability >= 90) {
constexpr bool is_static_1x1x1 = cute::is_static_v<typename ConvKernel::DispatchPolicy::ClusterShape> and
cute::size(typename ConvKernel::DispatchPolicy::ClusterShape{}) == 1;
dim3 cluster(cute::size<0>(typename ConvKernel::DispatchPolicy::ClusterShape{}),
cute::size<1>(typename ConvKernel::DispatchPolicy::ClusterShape{}),
cute::size<2>(typename ConvKernel::DispatchPolicy::ClusterShape{}));
void* kernel_params[] = {¶ms};
if constexpr (kEnableCudaHostAdapter) {
//
// Use the cuda host adapter
//
CUTLASS_ASSERT(cuda_adapter);
if (cuda_adapter) {
launch_result = cuda_adapter->launch(grid,
cluster,
block,
smem_size,
stream,
kernel_params,
kernel_index);
}
else {
return Status::kErrorInternal;
}
}
else {
CUTLASS_ASSERT(cuda_adapter == nullptr);
void const* kernel = (void const*) device_kernel<ConvKernel>;
if constexpr (ConvKernel::ArchTag::kMinComputeCapability == 90) {
if constexpr (is_static_1x1x1) {
device_kernel<ConvKernel><<<grid, block, smem_size, stream>>>(params);
launch_result = Status::kSuccess;
}
else {
launch_result = ClusterLauncher::launch(
grid, cluster, block, smem_size, stream, kernel, kernel_params);
}
}
}
}
else {
launch_result = Status::kSuccess;
if constexpr (kEnableCudaHostAdapter) {
CUTLASS_ASSERT(cuda_adapter);
if (cuda_adapter) {
void* kernel_params[] = {¶ms};
launch_result = cuda_adapter->launch(
grid, block, smem_size, stream, kernel_params, 0
);
}
else {
return Status::kErrorInternal;
}
}
else {
CUTLASS_ASSERT(cuda_adapter == nullptr);
device_kernel<ConvKernel><<<grid, block, smem_size, stream>>>(params);
}
}
cudaError_t result = cudaGetLastError();
if (cudaSuccess == result && Status::kSuccess == launch_result) {
return Status::kSuccess;
}
else {
CUTLASS_TRACE_HOST(" Kernel launch failed. Reason: " << result);
return Status::kErrorInternal;
}
}
//
// Non-static launch overloads that first create and set the internal params struct of this kernel handle.
//
/// Launches the kernel after first constructing Params internal state from supplied arguments.
Status
run(
Arguments const& args,
void* workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr,
int32_t kernel_index = 0
) {
Status status = initialize(args, workspace, stream, cuda_adapter);
if (Status::kSuccess == status) {
status = run(params_, stream, cuda_adapter, kernel_index);
}
return status;
}
/// Launches the kernel after first constructing Params internal state from supplied arguments.
Status
operator()(
Arguments const& args,
void* workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr) {
return run(args, workspace, stream, cuda_adapter);
}
/// Overload that allows a user to re-launch the same kernel without updating internal params struct.
Status
run(cudaStream_t stream = nullptr) {
return run(params_, stream);
}
/// Overload that allows a user to re-launch the same kernel without updating internal params struct.
Status
operator()(cudaStream_t stream = nullptr) {
return run(params_, stream);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::conv::device
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/device/conv_universal_adapter.hpp/0 | {
"file_path": "include/cutlass/conv/device/conv_universal_adapter.hpp",
"repo_id": "include",
"token_count": 5985
} | 15 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/kernel_hardware_info.hpp"
#include "cute/tensor.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/dispatch_policy.hpp"
#include "cutlass/pipeline/sm90_pipeline.hpp"
#include "cutlass/gemm/kernel/tile_scheduler.hpp"
///////////////////////////////////////////////////////////////////////////////
namespace cutlass::conv::kernel {
///////////////////////////////////////////////////////////////////////////////
template <
class CollectiveMainloop_,
class CollectiveEpilogue_,
class TileSchedulerTag_
>
class ConvUniversal<
CollectiveMainloop_,
CollectiveEpilogue_,
TileSchedulerTag_,
cute::enable_if_t<cute::is_base_of_v<cutlass::conv::KernelImplicitTmaWarpSpecializedSm90,
typename CollectiveMainloop_::DispatchPolicy::Schedule>>>
{
public:
//
// Type Aliases
//
// Mainloop derived types
using CollectiveMainloop = CollectiveMainloop_;
using TileShape = typename CollectiveMainloop::TileShape;
using TiledMma = typename CollectiveMainloop::TiledMma;
using ArchTag = typename CollectiveMainloop::ArchTag;
using ElementA = typename CollectiveMainloop::ElementA;
using StrideA = typename CollectiveMainloop::StrideA;
using ElementB = typename CollectiveMainloop::ElementB;
using StrideB = typename CollectiveMainloop::StrideB;
using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy;
using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator;
using ClusterShape = typename DispatchPolicy::ClusterShape;
using MainloopArguments = typename CollectiveMainloop::Arguments;
using MainloopParams = typename CollectiveMainloop::Params;
static constexpr int NumSpatialDimensions = CollectiveMainloop::NumSpatialDimensions;
static_assert(ArchTag::kMinComputeCapability >= 90);
// Epilogue derived types
using CollectiveEpilogue = CollectiveEpilogue_;
using ElementC = typename CollectiveEpilogue::ElementC;
using StrideC = typename CollectiveEpilogue::StrideC;
using ElementD = typename CollectiveEpilogue::ElementD;
using StrideD = typename CollectiveEpilogue::StrideD;
using EpilogueArguments = typename CollectiveEpilogue::Arguments;
using EpilogueParams = typename CollectiveEpilogue::Params;
using TileSchedulerTag = TileSchedulerTag_;
static_assert(cute::is_void_v<TileSchedulerTag>,
"TMA warp-specialized kernel does not support specializing the tile scheduler.");
using TileScheduler = typename cutlass::gemm::kernel::detail::TileSchedulerSelector<
TileSchedulerTag, ArchTag, TileShape, ClusterShape>::Scheduler;
using TileSchedulerArguments = typename TileScheduler::Arguments;
// Kernel level shared memory storage
struct SharedStorage {
union TensorStorage {
using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage;
using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage;
MainloopTensorStorage mainloop;
EpilogueTensorStorage epilogue;
} tensors;
struct PipelineStorage : cute::aligned_struct<16> {
using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage;
using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage;
alignas(16) MainloopPipelineStorage mainloop;
alignas(16) EpiLoadPipelineStorage epi_load;
} pipelines;
};
static constexpr int SharedStorageSize = sizeof(SharedStorage);
static constexpr uint32_t NumLoadWarpGroups = 1;
static constexpr uint32_t NumMmaWarpGroups = 1;
static constexpr uint32_t MaxThreadsPerBlock = CUTE_STATIC_V(size(TiledMma{})) + (NumLoadWarpGroups * NumThreadsPerWarpGroup);
static constexpr uint32_t MinBlocksPerMultiprocessor = 1;
// Host facing host arguments
struct Arguments {
MainloopArguments mainloop{};
EpilogueArguments epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerArguments scheduler{};
};
// Kernel device entry point API
struct Params {
MainloopParams mainloop;
EpilogueParams epilogue;
};
//
// Methods
//
// Map user facing arguments to device facing params
static Params
to_underlying_arguments(Arguments const& args, void* workspace) {
(void) workspace;
auto mainloop_params = CollectiveMainloop::to_underlying_arguments(args.mainloop, workspace);
auto problem_shape_MNKL = args.mainloop.problem_shape.get_transformed_problem_shape_MNKL();
return {
mainloop_params,
CollectiveEpilogue::to_underlying_arguments(problem_shape_MNKL, args.epilogue, workspace)
};
}
// Given arguemnts, returns true if the kernel can successfully compute upon them. False otherwise.
static bool
can_implement(Arguments const& args) {
bool implementable = true;
implementable &= CollectiveMainloop::can_implement(args.mainloop.problem_shape, args.mainloop);
implementable &= CollectiveEpilogue::can_implement(args.mainloop.problem_shape.get_transformed_problem_shape_MNKL(), args.epilogue);
return implementable;
}
static size_t
get_workspace_size(Arguments const& args) {
return 0;
}
static cutlass::Status
initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr,
CudaHostAdapter* cuda_adapter = nullptr) {
return Status::kSuccess;
}
// Computes the kernel launch grid shape based on runtime parameters
static dim3
get_grid_shape(Params const& params) {
return cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::get_tiled_cta_shape_mnl(
params.mainloop.problem_shape, TileShape{}, ClusterShape{});
}
static dim3
get_block_shape() {
return dim3(MaxThreadsPerBlock, 1, 1);
}
CUTLASS_DEVICE
void
operator()(Params const& params, char* smem_buf) {
using namespace cute;
using X = Underscore;
// Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a.
#if ! defined(__CUDA_ARCH_FEAT_SM90_ALL)
if constexpr(size<0>(typename TiledMma::AtomShape_MNK{}) == 64) {
printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n");
return;
}
#endif
enum class WarpGroupRole {
Producer = 0,
Consumer = 1,
};
enum class ProducerWarpRole {
MainloopEpilogue = 0,
Warp1 = 1,
Warp2 = 2,
Warp3 = 3
};
// Kernel level shared memory storage
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(smem_buf);
int thread_idx = int(threadIdx.x);
int lane_idx = canonical_lane_idx();
int warp_idx = canonical_warp_idx_sync();
int warp_idx_in_warp_group = warp_idx % NumWarpsPerWarpGroup;
int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup;
auto warp_group_role = WarpGroupRole(canonical_warp_group_idx());
auto producer_warp_role = ProducerWarpRole(warp_idx_in_warp_group);
int lane_predicate = cute::elect_one_sync();
uint32_t block_rank_in_cluster = cute::block_rank_in_cluster();
// Issue Tma Descriptor Prefetch from a single thread
if ((warp_idx == 0) && lane_predicate) {
CollectiveMainloop::prefetch_tma_descriptors(params.mainloop);
CollectiveEpilogue::prefetch_tma_descriptors(params.epilogue);
}
// Mainloop Load pipeline
using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline;
typename MainloopPipeline::Params mainloop_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::MainloopEpilogue) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer;
}
mainloop_pipeline_params.is_leader = warp_group_thread_idx == 0;
mainloop_pipeline_params.num_consumers = NumThreadsPerWarpGroup;
mainloop_pipeline_params.transaction_bytes = params.mainloop.tma_transaction_bytes;
MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params, ClusterShape{});
// Epilogue Load pipeline
using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline;
typename EpiLoadPipeline::Params epi_load_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::MainloopEpilogue) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer;
}
epi_load_pipeline_params.dst_blockid = cute::block_rank_in_cluster();
epi_load_pipeline_params.producer_arv_count = NumThreadsPerWarp;
epi_load_pipeline_params.consumer_arv_count = NumThreadsPerWarpGroup;
if constexpr (CollectiveEpilogue::RequiresTransactionBytes) {
epi_load_pipeline_params.transaction_bytes = params.epilogue.tma_transaction_bytes;
}
EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params);
// Epilogue Store pipeline
using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline;
typename EpiStorePipeline::Params epi_store_pipeline_params;
epi_store_pipeline_params.always_wait = true;
EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params);
// Initialize starting pipeline states for the collectives
// Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding)
typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state;
typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state;
// For the DMA Load (producer) we start with an opposite phase
// i.e., we skip all waits since we know that the buffer is indeed empty
PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state<MainloopPipeline>();
PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state<EpiLoadPipeline>();
PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state<EpiStorePipeline>();
auto cluster_wait_fn = [&] () {
// We need this to guarantee that the Pipeline init is visible
// To all producers and consumer thread blocks in the Cluster
if constexpr (size(ClusterShape{}) > 1) {
cute::cluster_arrive_relaxed();
return [] () { cute::cluster_wait(); };
}
else {
__syncthreads();
return [] () {}; // do nothing
}
} ();
// Separate out problem shape for convenience
auto problem_shape_MNKL = append<4>(params.mainloop.problem_shape, _1{});
auto [M, N, K, L] = problem_shape_MNKL;
// TMA requires special handling of strides to deal with coord codomain mapping
// Represent the full tensors -- get these from TMA
Tensor mA_mk = params.mainloop.tma_load_a.get_tma_tensor(make_shape(M, K));
Tensor mB_nk = params.mainloop.tma_load_b.get_tma_tensor(make_shape(N, K));
// Get the appropriate blocks for this thread block -- potential for thread block locality
auto cta_tile_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K)
TiledMma tiled_mma;
// Make tiled views, defer the slice
Tensor gA_mk = local_tile(mA_mk, cta_tile_shape, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k)
Tensor gB_nk = local_tile(mB_nk, cta_tile_shape, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k)
// Compute m_coord, n_coord, and l_coord with their post-tiled shapes
auto m_coord = idx2crd(int(blockIdx.x), shape<2>(gA_mk));
auto n_coord = idx2crd(int(blockIdx.y), shape<2>(gB_nk), compact_col_major(shape<2>(gB_nk)));
// The output shape M is linearized so the output coord M here should also be linearized.
auto output_tile_coord = make_coord(int(blockIdx.x), n_coord, _, Int<0>{});
// Slice with m_coord and n_coord
Tensor gA = gA_mk(_,_,m_coord,_); // (BLK_M,BLK_K,k)
Tensor gB = gB_nk(_,_,n_coord,_); // (BLK_N,BLK_K,k)
// Get pipeline iterators and increments from tensor shapes
auto k_tile_iter = cute::make_coord_iterator(shape<2>(gA));
auto k_tile_count = size<2>(gA);
// In a warp specialized kernel, collectives expose data movement and compute operations separately
CollectiveMainloop collective_mainloop;
CollectiveEpilogue collective_epilogue{params.epilogue, shared_storage.tensors.epilogue};
// Wait for all thread blocks in Cluster
cluster_wait_fn();
if (warp_group_role == WarpGroupRole::Producer) {
if (producer_warp_role == ProducerWarpRole::MainloopEpilogue) {
collective_mainloop.load(
mainloop_pipeline,
mainloop_pipe_producer_state,
gA, params.mainloop.tma_load_a,
gB, params.mainloop.tma_load_b,
k_tile_iter, k_tile_count,
lane_idx,
block_rank_in_cluster,
shared_storage.tensors.mainloop
);
// Update starting mainloop pipeline state for the pipeline drain
mainloop_pipe_producer_state.advance(k_tile_count);
// Make sure mainloop consumer has been waited upon before issuing epilogue load
collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state);
if (collective_epilogue.is_producer_load_needed()) {
epi_load_pipe_producer_state = collective_epilogue.load(
epi_load_pipeline,
epi_load_pipe_producer_state,
problem_shape_MNKL,
cta_tile_shape,
output_tile_coord,
tiled_mma,
lane_idx,
shared_storage.tensors.epilogue
);
collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state);
}
}
}
else if (warp_group_role == WarpGroupRole::Consumer) {
Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(cta_tile_shape)); // (MMA,MMA_M,MMA_N)
collective_mainloop.mma(
mainloop_pipeline,
mainloop_pipe_consumer_state,
accumulators,
k_tile_count,
thread_idx,
shared_storage.tensors.mainloop,
params.mainloop
);
// Make sure the math instructions are done and free buffers before entering the epilogue
collective_mainloop.mma_tail(
mainloop_pipeline,
mainloop_pipe_consumer_state,
k_tile_count
);
// Epilogue and write to gD
auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] =
collective_epilogue.store(
epi_load_pipeline,
epi_load_pipe_consumer_state,
epi_store_pipeline,
epi_store_pipe_producer_state,
problem_shape_MNKL,
cta_tile_shape,
output_tile_coord,
accumulators,
tiled_mma,
warp_group_thread_idx,
shared_storage.tensors.epilogue
);
collective_epilogue.store_tail(
epi_load_pipeline,
epi_load_pipe_consumer_state_next,
epi_store_pipeline,
epi_store_pipe_producer_state_next
);
}
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::conv::kernel
| include/cutlass/conv/kernel/sm90_implicit_gemm_tma_warpspecialized.hpp/0 | {
"file_path": "include/cutlass/conv/kernel/sm90_implicit_gemm_tma_warpspecialized.hpp",
"repo_id": "include",
"token_count": 6502
} | 16 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (activation tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dWgradActivationTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
using Params = Conv2dAnalyticParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
// Filter postion (r,s,c) in contiguous dimension stays constant for each gemm_iteration_k
int filter_r_[ThreadMap::Iterations::kContiguous];
int filter_s_[ThreadMap::Iterations::kContiguous];
int filter_c_[ThreadMap::Iterations::kContiguous];
int offset_npq_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dWgradActivationTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr))
{
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
// initialize r,s,c filter position for every contiguous iteration
CUTLASS_PRAGMA_UNROLL
for(int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int rsc_offset = threadblock_offset.column() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
filter_r_[c] = rsc_offset / (problem_size_.S * problem_size_.C);
int residual = rsc_offset % (problem_size_.S * problem_size_.C);
filter_s_[c] = residual / problem_size_.C;
filter_c_[c] = residual % problem_size_.C;
}
// initialize n, p, q offset for every strided iteration
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_npq_[s] = threadblock_offset.row() + thread_coord.strided()
+ s * ThreadMap::Delta::kStrided;
}
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_npq_) in GEMM-B by a CTA-K tile
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_npq_[s] += Shape::kRow * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the activation tensor x that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int r, s, c;
if (kAccessesPerVector == 1) {
/// One 128b aligned access fetching more than one element
c = filter_c_[iteration_contiguous_];
r = filter_r_[iteration_contiguous_];
s = filter_s_[iteration_contiguous_];
}
else {
/// Multiple access to support non-128b alignment in contiguous dimension
c = (filter_c_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements) % problem_size_.C;
int wrap_c = (filter_c_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements) / problem_size_.C;
s = (filter_s_[iteration_contiguous_] + wrap_c) % problem_size_.S;
int wrap_s = (filter_s_[iteration_contiguous_] + wrap_c) / problem_size_.S;
r = filter_r_[iteration_contiguous_] + wrap_s;
}
if (problem_size_.mode == Mode::kConvolution) {
r = (problem_size_.R - 1 - r);
s = (problem_size_.S - 1 - s);
}
int n = offset_npq_[iteration_strided_] / (problem_size_.P * problem_size_.Q);
int residual = offset_npq_[iteration_strided_] % (problem_size_.P * problem_size_.Q);
int p = residual / problem_size_.Q;
int q = residual % problem_size_.Q;
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
return TensorCoord(n, h, w, c);
}
/// Returns true if the current coordinate is within the activation tensor x
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dWgradActivationTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv2d_wgrad_activation_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv2d_wgrad_activation_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 3446
} | 17 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dWgradOutputGradientTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
struct Params : Conv3dWgradOutputGradientIteratorOptimizedParams {
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() {}
CUTLASS_HOST_DEVICE
Params(Conv3dWgradOutputGradientIteratorOptimizedParams const &base)
: Conv3dWgradOutputGradientIteratorOptimizedParams(base) {}
CUTLASS_HOST_DEVICE
Params(Conv3dProblemSize const &problem_size, Layout const &layout)
: Conv3dWgradOutputGradientIteratorOptimizedParams(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}) {}
};
private:
Params const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
uint32_t predicates_;
int filter_k_;
int offset_nzpq_;
public:
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientTileAccessIteratorOptimized(
Params const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_(0),
filter_k_(0),
offset_nzpq_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.row() + thread_coord.contiguous();
offset_nzpq_ = threadblock_offset.column() + thread_coord.strided();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int filter_k = filter_k_ + c * ThreadMap::Delta::kContiguous;
int offset_nzpq = offset_nzpq_ + s * ThreadMap::Delta::kStrided;
bool predicate = valid_(at_(offset_nzpq, filter_k));
uint32_t pred = (predicate ? 1u : 0);
int pred_idx = c + s * ThreadMap::Iterations::kContiguous;
predicates_ |= (pred << pred_idx);
}
}
// Offset pointer to (iteration_strided_, iteration_contiguous_) = (0, 0)
pointer_ += (
offset_nzpq_ * params.layout.stride()[0] + filter_k_
) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_npq_) in GEMM-A by a CTA-K tile
offset_nzpq_ += Shape::kColumn * problem_size_.split_k_slices;
// Clear predicates if needed
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
if (offset_nzpq_ + s * ThreadMap::Delta::kStrided >= params_.NZPQ) {
uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous);
predicates_ = (predicates_ & (~kClearMask));
}
}
pointer_ += params_.inc_next_nzpq;
}
private:
/// Returns the coordinate in the output gradient tensor Dy that is (offset_nzpq, k) pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at_(int offset_nzpq, int k) const {
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// int nzpq = offset_nzpq_;
// int n = nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q);
// int residual = nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q);
//
// int z = residual / (problem_size_.P * problem_size_.Q);
// residual = residual % (problem_size_.P * problem_size_.Q);
//
// int p = residual / problem_size_.Q;
// int q = residual % problem_size_.Q;
int residual, n, z, p, q;
fast_divmod(n, residual, offset_nzpq, params_.ZPQ, params_.zpq_mul, params_.zpq_shr);
fast_divmod(z, residual, residual, params_.PQ, params_.pq_mul, params_.pq_shr);
fast_divmod(p, q, residual, problem_size_.Q, params_.q_mul, params_.q_shr);
return TensorCoord(n, z, p, q, k);
}
/// Returns true if the coord is within the output gradient tensor Dy
CUTLASS_HOST_DEVICE
bool valid_(TensorCoord coord) const {
return coord.n() < problem_size_.N &&
coord.c() < problem_size_.K;
}
public:
/// Returns true if the current coordinate is within the output gradient tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous;
return (predicates_ & (1u << pred_idx));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(
pointer_ +
iteration_strided_ * params_.offset_next_strided +
iteration_contiguous_ * params_.offset_next_contiguous
);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientTileAccessIteratorOptimized &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_optimized.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_optimized.h",
"repo_id": "include",
"token_count": 3680
} | 18 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/thread/mma.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/thread/depthwise_mma.h"
#include "cutlass/gemm/warp/mma_simt_tile_iterator.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/conv/warp/mma_depthwise_simt_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK = 1,
/// Complex transformation on operand A
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transformation on operand B
ComplexTransform TransformB = ComplexTransform::kNone,
/// Used for partial specialization
typename Enable = bool>
class MmaDepthwiseSimt
: public cutlass::gemm::warp::
MmaSimt<Shape_, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, Policy_> {
using Base = cutlass::gemm::warp::
MmaSimt<Shape_, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, Policy_>;
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassSimt;
/// Hard-coded for now
using ArchTag = arch::Sm50;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
public:
/// Iterates over the B operand in memory
using IteratorB = cutlass::conv::warp::DepthwiseMmaSimtTileIterator<
MatrixShape<Policy::LaneMmaShape::kK, Shape::kN>,
cutlass::gemm::Operand::kB,
ElementB,
LayoutB,
Policy,
PartitionsK,
Shape::kK
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed A tile
using TransformedFragmentB = FragmentB;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaDepthwiseSimt():Base() {}
};
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width>
typename FilterShape_,
/// Shape of the output tile computed by thread- concept: conv::TensorNHWCShape<>
typename ThreadOutputShape_,
/// Shape of the output tile computed by threadblock - concept: conv::TensorNHWCShape<>
typename ThreadBlockOutputShape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Iterator algo type
conv::IteratorAlgorithm IteratorAlgorithm_ = IteratorAlgorithm::kAnalytic,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape_ = cutlass::MatrixShape<-1, -1>,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape_ = cutlass::MatrixShape<-1, -1>,
/// Activation Shape loaded by threadblock
typename ActivationShape_ = cutlass::conv::TensorNHWCShape<-1,-1,-1,-1>,
/// Number of partitions along K dimension
int PartitionsK = 1,
/// Complex transformation on operand A
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transformation on operand B
ComplexTransform TransformB = ComplexTransform::kNone,
/// Used for partial specialization
typename Enable = bool>
class MmaDepthwiseDirectConvSimt {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width>
using FilterShape = FilterShape_;
/// Shape of the output tile computed by thread- concept: conv::TensorNHWCShape<>
using ThreadOutputShape = ThreadOutputShape_;
/// Shape of the output tile computed by threadblock - concept: conv::TensorNHWCShape<>
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Iterator algo type
static conv::IteratorAlgorithm const IteratorAlgorithm = IteratorAlgorithm_;
/// Stride ( MatrixShape<Height, Width> )
using StrideShape = StrideShape_;
/// Dilation ( MatrixShape<Height, Width> )
using DilationShape = DilationShape_;
/// Activation Shape loaded by threadblock
using ActivationShape = ActivationShape_;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassSimt;
/// Hard-coded for now
using ArchTag = arch::Sm50;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
static constexpr bool use_dp4a = (platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutA>::value ||
platform::is_same< layout::RowMajorInterleaved<4>, LayoutA >::value) &&
platform::is_same< ElementA, int8_t >::value &&
platform::is_same< ElementB, int8_t >::value;
using dp4a_type = typename platform::conditional< use_dp4a , int8_t, bool >::type;
/// Thread-level matrix multiply accumulate operator
using ThreadMma = cutlass::conv::thread::DepthwiseDirectConvElementwiseInnerProduct<
cutlass::gemm::GemmShape<
Shape::kM / Policy::WarpShape::kRow, // number of output pixels proccessed per thread
Shape::kN / Policy::WarpShape::kColumn, // number of channels proccessed per thread
1>,
ElementA,
ElementB,
ElementC,
arch::OpMultiplyAdd,
dp4a_type
>;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename ThreadMma::ArchMmaOperator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Shape of the underlying instruction
using InstructionShape = cutlass::gemm::GemmShape<1,1,use_dp4a ? 4 : 1>;
public:
/// Iterates over the A operand in memory
using IteratorA = cutlass::conv::warp::DepthwiseDirect2dConvSimtTileIterator<
MatrixShape<Shape::kM, Shape::kN>, // <output tile=(P*Q), output channels> per warp
FilterShape,
ThreadOutputShape,
ThreadBlockOutputShape,
cutlass::gemm::Operand::kA,
ElementA,
Policy,
IteratorAlgorithm,
StrideShape,
DilationShape,
ActivationShape,
PartitionsK,
Shape::kK
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = cutlass::gemm::warp::MmaSimtTileIterator<
MatrixShape<1, Shape::kN>,
cutlass::gemm::Operand::kB,
ElementB,
LayoutB,
Policy,
PartitionsK,
Shape::kK
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed A tile
using TransformedFragmentB = FragmentB;
/// Iterates over the C operand in memory
using IteratorC = cutlass::gemm::warp::MmaSimtTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
cutlass::gemm::Operand::kC,
ElementC,
LayoutC,
Policy
>;
/// Storage for C tile
using FragmentC = typename ThreadMma::FragmentC;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaDepthwiseDirectConvSimt() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &d,
FragmentA a,
FragmentB b,
FragmentC const &c, int group_idx = 0) const {
ThreadMma mma;
mma(d, a, b, c);
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace conv
} // namespace cutlass
| include/cutlass/conv/warp/mma_depthwise_simt.h/0 | {
"file_path": "include/cutlass/conv/warp/mma_depthwise_simt.h",
"repo_id": "include",
"token_count": 4004
} | 19 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Visitor tree operation base implementation to enable composable fusions
for the sm90 TMA warp-specialized (ws) epilogue
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/workspace.h"
#include "cute/tensor.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::fusion {
using namespace cute;
using cute::tuple;
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partitioning Helpers
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class CtaTileMN,
class EpilogueTile,
class TiledCopy
>
CUTLASS_HOST_DEVICE
constexpr auto
sm90_partition_for_epilogue(
CtaTileMN cT, // (CTA_M,CTA_N,...)
EpilogueTile epi_tile, // (EPI_TILE_M,EPI_TILE_N)
TiledCopy tiled_copy,
int thread_idx) {
ThrCopy thread_copy = tiled_copy.get_thread_slice(thread_idx);
Tensor cT_epi = flat_divide(cT, epi_tile); // (EPI_TILE_M,EPI_TILE_N,EPI_M,EPI_N,...)
if constexpr (ReferenceSrc) {
return thread_copy.partition_S(cT_epi); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N,...)
}
else {
return thread_copy.partition_D(cT_epi); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N,...)
}
}
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class Engine, class LayoutMNL,
class TileShapeMNK,
class TileCoordMNKL,
class EpilogueTile,
class TiledCopy
>
CUTLASS_HOST_DEVICE
constexpr auto
sm90_partition_for_epilogue(
Tensor<Engine, LayoutMNL> mT, // (M,N,L)
TileShapeMNK tile_shape_mnk, // (CTA_M,CTA_N,CTA_K)
TileCoordMNKL tile_coord_mnkl, // (m,n,k,l)
EpilogueTile epi_tile, // (EPI_TILE_M,EPI_TILE_N)
TiledCopy tiled_copy,
int thread_idx) {
auto [m, n, k, l] = tile_coord_mnkl;
auto coord_shape =
make_coord(m, n, l)
;
Tensor cT = local_tile(mT, take<0,2>(tile_shape_mnk), coord_shape); // (CTA_M,CTA_N)
Tensor tCcT =
sm90_partition_for_epilogue<ReferenceSrc>(cT, epi_tile, tiled_copy, thread_idx); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
return tCcT;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Visitor Implementation
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template<
class ProblemShapeMNKL,
class TileShapeMNK,
class TileCoordMNKL,
class TiledMma,
class EpilogueTile
>
struct ProducerLoadArgs {
ProblemShapeMNKL problem_shape_mnkl;
TileShapeMNK tile_shape_mnk;
TileCoordMNKL tile_coord_mnkl;
TiledMma tiled_mma;
EpilogueTile epi_tile;
int thread_idx;
CUTLASS_DEVICE
ProducerLoadArgs(
ProblemShapeMNKL problem_shape_mnkl,
TileShapeMNK tile_shape_mnk,
TileCoordMNKL tile_coord_mnkl,
TiledMma tiled_mma,
EpilogueTile epi_tile,
int thread_idx)
: problem_shape_mnkl(problem_shape_mnkl),
tile_shape_mnk(tile_shape_mnk),
tile_coord_mnkl(tile_coord_mnkl),
tiled_mma(tiled_mma),
epi_tile(epi_tile),
thread_idx(thread_idx) {}
};
template<
class ProblemShapeMNKL,
class TileShapeMNK,
class TileCoordMNKL,
class TiledMma,
class EpilogueTile,
class TiledCopy,
class CoordTensor,
class Residue,
class ThrCoordTensor,
class ThrResidue,
class ThrSrcTensor
>
struct ConsumerStoreArgs {
ProblemShapeMNKL problem_shape_mnkl;
TileShapeMNK tile_shape_mnk;
TileCoordMNKL tile_coord_mnkl;
TiledMma tiled_mma;
EpilogueTile epi_tile;
TiledCopy tiled_copy;
CoordTensor cD;
Residue residue_cD;
ThrCoordTensor tCcD;
ThrResidue residue_tCcD;
ThrSrcTensor const& tCrC;
int thread_idx;
CUTLASS_DEVICE
ConsumerStoreArgs(
ProblemShapeMNKL problem_shape_mnkl,
TileShapeMNK tile_shape_mnk,
TileCoordMNKL tile_coord_mnkl,
TiledMma tiled_mma,
EpilogueTile epi_tile,
TiledCopy tiled_copy,
CoordTensor cD,
Residue residue_cD,
ThrCoordTensor tCcD,
ThrResidue residue_tCcD,
ThrSrcTensor const& tCrC,
int thread_idx)
: problem_shape_mnkl(problem_shape_mnkl),
tile_shape_mnk(tile_shape_mnk),
tile_coord_mnkl(tile_coord_mnkl),
tiled_mma(tiled_mma),
epi_tile(epi_tile),
tiled_copy(tiled_copy),
cD(cD),
residue_cD(residue_cD),
tCcD(tCcD),
residue_tCcD(residue_tCcD),
tCrC(tCrC),
thread_idx(thread_idx) {}
};
template <class... Ops>
struct Sm90VisitorImplBase {
// Shared memory allocation
using SharedStorage = tuple<typename Ops::SharedStorage...>;
// Host side fusion arguments
using Arguments = tuple<typename Ops::Arguments...>;
// Device side fusion params (Kernel-entry API)
using Params = tuple<typename Ops::Params...>;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
uint8_t* op_workspace = reinterpret_cast<uint8_t*>(workspace);
return transform_apply(tuple<Ops...>{}, args,
[&] (auto&& op, auto const& op_args) {
using Op = cute::remove_cvref_t<decltype(op)>;
auto ret = Op::to_underlying_arguments(problem_shape, op_args, op_workspace);
if (op_workspace != nullptr) {
size_t op_workspace_size = Op::get_workspace_size(problem_shape, op_args);
op_workspace += round_nearest(op_workspace_size, MinWorkspaceAlignment);
}
return ret;
},
[] (auto&&... op_params) { return cute::make_tuple(op_params...); }
);
}
template <class ProblemShape>
static bool
can_implement(ProblemShape const& problem_shape, Arguments const& args) {
return transform_apply(tuple<Ops...>{}, args,
[&] (auto&& op, auto const& op_args) {
using Op = cute::remove_cvref_t<decltype(op)>;
return Op::can_implement(problem_shape, op_args);
},
[&] (auto&&... implementable) {
return (true && ... && implementable);
}
);
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return transform_apply(tuple<Ops...>{}, args,
[&] (auto&& op, auto const& op_args) {
using Op = cute::remove_cvref_t<decltype(op)>;
size_t op_workspace_size = Op::get_workspace_size(problem_shape, op_args);
return round_nearest(op_workspace_size, MinWorkspaceAlignment);
},
[&] (auto&&... op_workspace_size) {
return (0 + ... + op_workspace_size);
}
);
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
Status status = Status::kSuccess;
uint8_t* op_workspace = reinterpret_cast<uint8_t*>(workspace);
return transform_apply(tuple<Ops...>{}, args,
// Initialize each operation's workspace, stopping at the first error
[&] (auto&& op, auto const& op_args) {
if (status != Status::kSuccess) {
return status;
}
using Op = cute::remove_cvref_t<decltype(op)>;
status = Op::initialize_workspace(problem_shape, op_args, op_workspace, stream, cuda_adapter);
if (op_workspace != nullptr) {
size_t op_workspace_size = Op::get_workspace_size(problem_shape, op_args);
op_workspace += round_nearest(op_workspace_size, MinWorkspaceAlignment);
}
return status;
},
// Return the final status
[&] (auto const&...ops) { return status; }
);
}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase() {}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase(Params const& params, SharedStorage const& shared_storage)
: ops(transform_apply(tuple<Ops...>{}, params, shared_storage,
[] (auto&& op, auto const& op_params, auto&& op_storage) {
using Op = cute::remove_cvref_t<decltype(op)>;
return Op(op_params, op_storage);
},
[] (auto&&... ops) { return cute::make_tuple(ops...); }
)) {}
// Ops can store kernel persistent variables (e.g. descriptors, scalars, wave counters)
tuple<Ops...> ops;
};
template <class... Ops>
struct Sm90VisitorImpl : Sm90VisitorImplBase<Ops...> {
using Impl = Sm90VisitorImplBase<Ops...>;
using Params = typename Impl::Params;
using SharedStorage = typename Impl::SharedStorage;
CUTLASS_HOST_DEVICE
Sm90VisitorImpl() {}
CUTLASS_HOST_DEVICE
Sm90VisitorImpl(Params const& params, SharedStorage const& shared_storage)
: Impl(params, shared_storage) {}
using Impl::ops;
//
// Queries for kernel runtime
//
// Is a specialized warp for producer TMA loads needed
// e.g. Aux tensor loads, broadcasts using TMA bulk copy
// This condition cannot change between work tiles because it is used
// to determine whether the load warp should exit early or not
// e.g. for batched beta this must always be true regardless of current batch idx
CUTLASS_DEVICE bool
is_producer_load_needed() const {
return cute::apply(ops,
[] (auto const&... op) {
return (false || ... || op.is_producer_load_needed());
}
);
}
// Is a producer TMA load specifically for C needed
// If this is true then is_producer_load_needed must also be true
// This condition can change between work tiles because it is only used
// to determine whether the TMA and smem loads for C of a given tile should happen
// e.g. for batched beta this can be false depending on current batch idx
CUTLASS_DEVICE bool
is_C_load_needed() const {
return cute::apply(ops,
[] (auto const&... op) {
return (false || ... || op.is_C_load_needed());
}
);
}
//
// Producer load callbacks, called by the epilogue load warp.
// Operations usually only define this if TMA load is needed. Most operations will reuse this empy implementation
// Load callbacks are responsible for issuing corresponding mbarrier expect-tx ops for any TMA loads issued, but
// are not responsible for issuing the producer_commit barrier arrival, which is issued by the collective instead
// If this is non-empty, is_producer_load_needed must be true.
//
template <class CallbacksTuple>
struct ProducerLoadCallbacks {
// Callbacks can store non-persistent variables (e.g. tensors) or copies of persistent variables
CallbacksTuple callbacks_tuple;
// Before entry of the subtile load loop. Bulk copies usually performed here.
// Upon entry the producer_acquire of the first subtile lock has completed.
// full_mbarrier_ptr is the corresponding barrier for the subsequent producer_commit arrival
CUTLASS_DEVICE void
begin(uint64_t* full_mbarrier_ptr, int load_iteration, bool issue_tma_load) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.begin(full_mbarrier_ptr, load_iteration, issue_tma_load);
}
);
}
// Entry of the subtile load loop. Aux loads usually performed here
// Upon entry the producer acquire of the current subtile lock has completed.
// Upon exit all TMA loads for this subtile must have been issued, with corresponding expect-tx operations
CUTLASS_DEVICE void
step(uint64_t* full_mbarrier_ptr, int epi_m, int epi_n, int load_iteration, bool issue_tma_load) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.step(full_mbarrier_ptr, epi_m, epi_n, load_iteration, issue_tma_load);
}
);
}
// Exit of the subtile load loop.
CUTLASS_DEVICE void
end() {
for_each(callbacks_tuple,
[] (auto& callbacks) {
callbacks.end();
}
);
}
};
// Producer load callbacks factory
// All operations must redefine this, but most can just dispatch to the base impl
template <class... Args>
CUTLASS_DEVICE auto
get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) {
return transform_apply(ops,
[&] (auto& op) {
return op.get_producer_load_callbacks(args);
},
[] (auto&&... callbacks) {
auto callbacks_tuple = cute::make_tuple(callbacks...);
return ProducerLoadCallbacks<decltype(callbacks_tuple)>{callbacks_tuple};
}
);
}
//
// Consumer store callbacks, called by the epilogue store warps.
// All operations must redefine this, with optional inheritance from this empty implementation.
//
template <class CallbacksTuple>
struct ConsumerStoreCallbacks {
// Callbacks can store non-persistent variables (e.g. tensors) or copies of persistent variables
CallbacksTuple callbacks_tuple;
// Before entry of subtile store loop. Gmem broadcasts usually performed here.
CUTLASS_DEVICE void
begin() {
for_each(callbacks_tuple,
[] (auto& callbacks) {
callbacks.begin();
}
);
}
// Start of subtile store iteration
CUTLASS_DEVICE void
begin_loop(int epi_m, int epi_n) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.begin_loop(epi_m, epi_n);
}
);
}
// Before visit callback. Smem broadcasts usually performed here.
// Upon entry, all producer loads for this subtile are completed and visible.
CUTLASS_DEVICE void
previsit(int epi_m, int epi_n, int load_iteration, bool is_producer_load_needed) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.previsit(epi_m, epi_n, load_iteration, is_producer_load_needed);
}
);
}
// Perform the fused elementwise computation
template <typename ElementAccumulator, typename... ElementInputs, int FragmentSize>
CUTLASS_DEVICE auto // returns an Array
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n,
Array<ElementInputs, FragmentSize> const&... frg_inputs) // depends on the N-naryness of the op
= delete; // Must be implemented for each operation
// After visit call. Smem reductions usually performed here
// reduction_buffer is an arbitrary smem tensor that can be used for workspace
// It is each nodes reponsibility to assert that this buffer is sufficiently sized
// and to ensure that this buffer is no longer needed upon callback exit
// i.e. results are synchronized and no longer in the reduction buffer
//
// visit_results is a rmem tensor that contains the results of visit() for an entire
// on the current epilogue subtile
template <class STensor, class SyncFn, class VTensor>
CUTLASS_DEVICE void
reduce(STensor&& reduction_buffer, SyncFn const& sync_fn, int epi_m, int epi_n, bool is_last_iteration, VTensor visit_results) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.reduce(reduction_buffer, sync_fn, epi_m, epi_n, is_last_iteration, visit_results);
}
);
}
// After reduce call, before smem async fence. Smem stores usually performed here.
// Upon exit, all smem stores for TMA must have been issued
CUTLASS_DEVICE void
postreduce(int epi_m, int epi_n, int store_iteration, bool issue_smem_store) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.postreduce(epi_m, epi_n, store_iteration, issue_smem_store);
}
);
}
// After smem async fence, before TMA store commit. Aux stores usually performed here
// Upon exit, all TMA stores for this subtile must have been issued
// Because of the TMA store delay optimization, this entry point must ONLY be used for TMA stores
// other gmem stores can be placed in the reduce or postreduce entry points
CUTLASS_DEVICE void
tma_store(int epi_m, int epi_n, int store_iteration, bool issue_tma_store) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.tma_store(epi_m, epi_n, store_iteration, issue_tma_store);
}
);
}
// End of subtile store iteration
CUTLASS_DEVICE void
end_loop(int epi_m, int epi_n) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.end_loop(epi_m, epi_n);
}
);
}
// Exit of subtile store loop. Gmem reductions usually performed here.
CUTLASS_DEVICE void
end() {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.end();
}
);
}
};
// Consumer store callbacks factory
// All operations must redefine this
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
return transform_apply(ops,
[&] (auto& op) {
return op.template get_consumer_store_callbacks<ReferenceSrc>(args);
},
[] (auto&&... callbacks) {
auto callbacks_tuple = cute::make_tuple(callbacks...);
return ConsumerStoreCallbacks<decltype(callbacks_tuple)>{callbacks_tuple};
}
);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Convenience aliases
using EmptyProducerLoadCallbacks = Sm90VisitorImpl<>::ProducerLoadCallbacks<cute::tuple<>>;
using EmptyConsumerStoreCallbacks = Sm90VisitorImpl<>::ConsumerStoreCallbacks<cute::tuple<>>;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace detail
using namespace detail;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Tree visitor
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <class NodeOp, class... ChildOps>
struct Sm90TreeVisitor : Sm90VisitorImpl<ChildOps..., NodeOp> {
using Impl = Sm90VisitorImpl<ChildOps..., NodeOp>;
using Params = typename Impl::Params;
using SharedStorage = typename Impl::SharedStorage;
CUTLASS_HOST_DEVICE
Sm90TreeVisitor() {}
CUTLASS_HOST_DEVICE
Sm90TreeVisitor(
Params const& params,
SharedStorage const& shared_storage)
: Impl(params, shared_storage) {}
template<class CallbacksImpl>
struct ConsumerStoreCallbacks : CallbacksImpl {
CUTLASS_DEVICE
ConsumerStoreCallbacks(CallbacksImpl&& impl)
: CallbacksImpl(cute::forward<CallbacksImpl>(impl)) {}
using CallbacksImpl::callbacks_tuple;
template <typename ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) {
constexpr int Rm1 = sizeof...(ChildOps);
return cute::detail::tapply(callbacks_tuple,
[&] (auto& child_callbacks) {
return child_callbacks.visit(frg_acc, epi_v, epi_m, epi_n); // child ops must be nullary (e.g. loads, trees)
},
[&] (auto&&... frg_inputs) {
return get<Rm1>(callbacks_tuple).visit(frg_acc, epi_v, epi_m, epi_n, frg_inputs...);
},
make_seq<Rm1>{} // restrict the transform to R-1 child ops, apply is for node op
);
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
auto callbacks_tuple = Sm90VisitorImpl<ChildOps..., NodeOp>::
template get_consumer_store_callbacks<ReferenceSrc>(args);
return ConsumerStoreCallbacks<decltype(callbacks_tuple)>(std::move(callbacks_tuple));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// DAG visitors
//
/////////////////////////////////////////////////////////////////////////////////////////////////
// Most DAG fusions can be represented as a set of output trees with a common input tree
// The common input is first evaluated, then the result is passed as the acc fragment to the output trees
template <class InputTree, class OutputTree, class... AuxOutTrees>
struct Sm90SplitTreeVisitor : Sm90VisitorImpl<InputTree, AuxOutTrees..., OutputTree> {
using Sm90VisitorImpl<InputTree, AuxOutTrees..., OutputTree>::Sm90VisitorImpl;
template<class CallbacksImpl>
struct ConsumerStoreCallbacks : CallbacksImpl {
CUTLASS_DEVICE
ConsumerStoreCallbacks(CallbacksImpl&& impl)
: CallbacksImpl(cute::forward<CallbacksImpl>(impl)) {}
using CallbacksImpl::callbacks_tuple;
template <typename ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) {
Array frg_input = get<0>(callbacks_tuple).visit(frg_acc, epi_v, epi_m, epi_n);
constexpr int Rm2 = sizeof...(AuxOutTrees);
cute::for_each(make_seq<Rm2>{}, // restrict the sequence to aux out trees
[&] (auto I) {
get<I+1>(callbacks_tuple).visit(frg_input, epi_v, epi_m, epi_n);
}
);
return get<Rm2+1>(callbacks_tuple).visit(frg_input, epi_v, epi_m, epi_n);
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
auto callbacks_tuple = Sm90VisitorImpl<InputTree, AuxOutTrees..., OutputTree>::
template get_consumer_store_callbacks<ReferenceSrc>(args);
return ConsumerStoreCallbacks<decltype(callbacks_tuple)>(std::move(callbacks_tuple));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template<
// deducing the output type for all the nodes is tricky so we just convert them all to a common type
// if multiple compute types are needed then split into multiple subgraphs grouped by type
class ElementCompute,
class EdgeTuple, // tuple of int_sequence, each sequence is the children indices (indexed by topological order) for each node
class... Ops // in topological order, last op is the output. EdgeTuple must match this order
>
struct Sm90TopologicalVisitor : Sm90VisitorImpl<Ops...> {
static_assert(is_static_v<EdgeTuple>);
static_assert(cute::rank(EdgeTuple{}) == sizeof...(Ops));
static_assert(sizeof...(Ops) > 1);
using Sm90VisitorImpl<Ops...>::Sm90VisitorImpl;
template<class CallbacksImpl>
struct ConsumerStoreCallbacks : CallbacksImpl {
CUTLASS_DEVICE
ConsumerStoreCallbacks(CallbacksImpl&& impl)
: CallbacksImpl(cute::forward<CallbacksImpl>(impl)) {}
using CallbacksImpl::callbacks_tuple;
template <typename ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n) {
constexpr int Rm1 = sizeof...(Ops) - 1;
auto frg_compute_tuple = cute::repeat<Rm1>(Array<ElementCompute, FragmentSize>{});
return cute::detail::tapply(EdgeTuple{}, callbacks_tuple, frg_compute_tuple,
// Visit the first R-1 ops in topological order
[&] (auto&& edge_seq, auto& callbacks, auto& frg_compute) {
frg_compute = cute::detail::apply(frg_compute_tuple,
// Compute the current op with children inputs
[&] (auto const&... frg_inputs) {
auto frg_output = callbacks.visit(frg_acc, epi_v, epi_m, epi_n, frg_inputs...);
using ElementOutput = typename decltype(frg_output)::Element;
using ConvertOutput = NumericArrayConverter<ElementCompute, ElementOutput, FragmentSize>;
ConvertOutput convert_output{};
return convert_output(frg_output);
},
// Get inputs in the sequence given by the children indices of the current op
edge_seq
);
return frg_compute; // unused
},
// Visit the last op
[&] (auto const&...ops) {
return cute::detail::apply(frg_compute_tuple,
// Compute the last op with children inputs
[&] (auto const&... frg_inputs) {
return get<Rm1>(callbacks_tuple).visit(frg_acc, epi_v, epi_m, epi_n, frg_inputs...);
},
// Get inputs in the sequence given by the children indices of the last op
get<Rm1>(EdgeTuple{})
);
},
// Transform to visit R-1 ops, apply to visit last op
make_seq<Rm1>{}
);
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
auto callbacks_tuple = Sm90VisitorImpl<Ops...>::
template get_consumer_store_callbacks<ReferenceSrc>(args);
return ConsumerStoreCallbacks<decltype(callbacks_tuple)>(std::move(callbacks_tuple));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Base specializations so we can have standard layout params and simple aggregate initializers
namespace detail {
template <class Op0>
struct Sm90VisitorImplBase<Op0> {
// Retain tuple for SharedStorage because empty structs have 1B alignment
// tuples use multiple inheritance, avoids this problem
using SharedStorage = tuple<
typename Op0::SharedStorage
>;
struct Arguments {
typename Op0::Arguments op_0;
};
struct Params {
typename Op0::Params op_0;
};
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
return Params{
Op0::to_underlying_arguments(problem_shape, args.op_0, workspace)
};
}
template <class ProblemShape>
static bool
can_implement(ProblemShape const& problem_shape, Arguments const& args) {
return Op0::can_implement(problem_shape, args.op_0);
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
size_t workspace_size = 0;
workspace_size += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
return workspace_size;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
Status status = Status::kSuccess;
uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace);
size_t workspace_offset = 0;
status = Op0::initialize_workspace(problem_shape, args.op_0, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
return status;
}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase() {}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase(Params const& params, SharedStorage const& shared_storage)
: ops({
Op0(params.op_0, get<0>(shared_storage))
}) {}
tuple<Op0> ops;
};
template <class Op0, class Op1>
struct Sm90VisitorImplBase<Op0, Op1> {
using SharedStorage = tuple<
typename Op0::SharedStorage,
typename Op1::SharedStorage
>;
struct Arguments {
typename Op0::Arguments op_0;
typename Op1::Arguments op_1;
};
struct Params {
typename Op0::Params op_0;
typename Op1::Params op_1;
};
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
size_t op_0_workspace_size = Op0::get_workspace_size(problem_shape, args.op_0);
uint8_t* op_0_workspace = reinterpret_cast<uint8_t*>(workspace);
uint8_t* op_1_workspace = op_0_workspace + op_0_workspace_size;
return Params{
Op0::to_underlying_arguments(problem_shape, args.op_0, op_0_workspace),
Op1::to_underlying_arguments(problem_shape, args.op_1, op_1_workspace)
};
}
template <class ProblemShape>
static bool
can_implement(ProblemShape const& problem_shape, Arguments const& args) {
return Op0::can_implement(problem_shape, args.op_0) &&
Op1::can_implement(problem_shape, args.op_1);
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
size_t workspace_size = 0;
workspace_size += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += Op1::get_workspace_size(problem_shape, args.op_1);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
return workspace_size;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
Status status = Status::kSuccess;
uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace);
size_t workspace_offset = 0;
status = Op0::initialize_workspace(problem_shape, args.op_0, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = Op1::initialize_workspace(problem_shape, args.op_1, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op1::get_workspace_size(problem_shape, args.op_1);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
return status;
}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase() {}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase(Params const& params, SharedStorage const& shared_storage)
: ops({
Op0(params.op_0, get<0>(shared_storage)),
Op1(params.op_1, get<1>(shared_storage))
}) {}
tuple<Op0, Op1> ops;
};
template <class Op0, class Op1, class Op2>
struct Sm90VisitorImplBase<Op0, Op1, Op2> {
using SharedStorage = tuple<
typename Op0::SharedStorage,
typename Op1::SharedStorage,
typename Op2::SharedStorage
>;
struct Arguments {
typename Op0::Arguments op_0;
typename Op1::Arguments op_1;
typename Op2::Arguments op_2;
};
struct Params {
typename Op0::Params op_0;
typename Op1::Params op_1;
typename Op2::Params op_2;
};
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
size_t op_0_workspace_size = Op0::get_workspace_size(problem_shape, args.op_0);
size_t op_1_workspace_size = Op1::get_workspace_size(problem_shape, args.op_1);
uint8_t* op_0_workspace = reinterpret_cast<uint8_t*>(workspace);
uint8_t* op_1_workspace = op_0_workspace + op_0_workspace_size;
uint8_t* op_2_workspace = op_1_workspace + op_1_workspace_size;
return Params{
Op0::to_underlying_arguments(problem_shape, args.op_0, op_0_workspace),
Op1::to_underlying_arguments(problem_shape, args.op_1, op_1_workspace),
Op2::to_underlying_arguments(problem_shape, args.op_2, op_2_workspace)
};
}
template <class ProblemShape>
static bool
can_implement(ProblemShape const& problem_shape, Arguments const& args) {
return Op0::can_implement(problem_shape, args.op_0) &&
Op1::can_implement(problem_shape, args.op_1) &&
Op2::can_implement(problem_shape, args.op_2);
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
size_t workspace_size = 0;
workspace_size += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += Op1::get_workspace_size(problem_shape, args.op_1);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += Op2::get_workspace_size(problem_shape, args.op_2);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
return workspace_size;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
Status status = Status::kSuccess;
uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace);
size_t workspace_offset = 0;
status = Op0::initialize_workspace(problem_shape, args.op_0, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = Op1::initialize_workspace(problem_shape, args.op_1, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op1::get_workspace_size(problem_shape, args.op_1);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = Op2::initialize_workspace(problem_shape, args.op_2, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op2::get_workspace_size(problem_shape, args.op_2);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
return status;
}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase() {}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase(Params const& params, SharedStorage const& shared_storage)
: ops({
Op0(params.op_0, get<0>(shared_storage)),
Op1(params.op_1, get<1>(shared_storage)),
Op2(params.op_2, get<2>(shared_storage))
}) {}
tuple<Op0, Op1, Op2> ops;
};
template <class Op0, class Op1, class Op2, class Op3>
struct Sm90VisitorImplBase<Op0, Op1, Op2, Op3> {
using SharedStorage = tuple<
typename Op0::SharedStorage,
typename Op1::SharedStorage,
typename Op2::SharedStorage,
typename Op3::SharedStorage
>;
struct Arguments {
typename Op0::Arguments op_0;
typename Op1::Arguments op_1;
typename Op2::Arguments op_2;
typename Op3::Arguments op_3;
};
struct Params {
typename Op0::Params op_0;
typename Op1::Params op_1;
typename Op2::Params op_2;
typename Op3::Params op_3;
};
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
size_t op_0_workspace_size = Op0::get_workspace_size(problem_shape, args.op_0);
size_t op_1_workspace_size = Op1::get_workspace_size(problem_shape, args.op_1);
size_t op_2_workspace_size = Op2::get_workspace_size(problem_shape, args.op_2);
uint8_t* op_0_workspace = reinterpret_cast<uint8_t*>(workspace);
uint8_t* op_1_workspace = op_0_workspace + op_0_workspace_size;
uint8_t* op_2_workspace = op_1_workspace + op_1_workspace_size;
uint8_t* op_3_workspace = op_2_workspace + op_2_workspace_size;
return Params{
Op0::to_underlying_arguments(problem_shape, args.op_0, op_0_workspace),
Op1::to_underlying_arguments(problem_shape, args.op_1, op_1_workspace),
Op2::to_underlying_arguments(problem_shape, args.op_2, op_2_workspace),
Op3::to_underlying_arguments(problem_shape, args.op_3, op_3_workspace)
};
}
template <class ProblemShape>
static bool
can_implement(ProblemShape const& problem_shape, Arguments const& args) {
return Op0::can_implement(problem_shape, args.op_0) &&
Op1::can_implement(problem_shape, args.op_1) &&
Op2::can_implement(problem_shape, args.op_2) &&
Op3::can_implement(problem_shape, args.op_3);
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
size_t workspace_size = 0;
workspace_size += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += Op1::get_workspace_size(problem_shape, args.op_1);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += Op2::get_workspace_size(problem_shape, args.op_2);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += Op3::get_workspace_size(problem_shape, args.op_3);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
return workspace_size;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
Status status = Status::kSuccess;
uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace);
size_t workspace_offset = 0;
status = Op0::initialize_workspace(problem_shape, args.op_0, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op0::get_workspace_size(problem_shape, args.op_0);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = Op1::initialize_workspace(problem_shape, args.op_1, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op1::get_workspace_size(problem_shape, args.op_1);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = Op2::initialize_workspace(problem_shape, args.op_2, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op2::get_workspace_size(problem_shape, args.op_2);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = Op3::initialize_workspace(problem_shape, args.op_3, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += Op3::get_workspace_size(problem_shape, args.op_3);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
return status;
}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase() {}
CUTLASS_HOST_DEVICE
Sm90VisitorImplBase(Params const& params, SharedStorage const& shared_storage)
: ops({
Op0(params.op_0, get<0>(shared_storage)),
Op1(params.op_1, get<1>(shared_storage)),
Op2(params.op_2, get<2>(shared_storage)),
Op3(params.op_3, get<3>(shared_storage))
}) {}
tuple<Op0, Op1, Op2, Op3> ops;
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::fusion
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/fusion/sm90_visitor_tma_warpspecialized.hpp/0 | {
"file_path": "include/cutlass/epilogue/fusion/sm90_visitor_tma_warpspecialized.hpp",
"repo_id": "include",
"token_count": 15492
} | 20 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations on planar-complex arrays
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/complex.h"
#include "cutlass/array_planar_complex.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/scale_type.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to arrays of planar-complex elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
/// Note, as with most CUTLASS components for planar complex, the template arguments describe
/// the underlying real data type.
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
ScaleType::Kind Scale = ScaleType::Default ///< Control Alpha and Beta scaling
>
class LinearCombinationPlanarComplex {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementScalar = complex<ElementCompute>;
static int const kCount = Count;
static const ScaleType::Kind kScale = Scale;
using FragmentOutput = ArrayPlanarComplex<ElementOutput, kCount>;
using FragmentAccumulator = ArrayPlanarComplex<ElementAccumulator, kCount>;
using ComputeFragment = ArrayPlanarComplex<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
struct Params {
ElementScalar alpha{ElementCompute(1)}; ///< scales accumulators
ElementScalar beta{ElementCompute(0)}; ///< scales source tensor
ElementScalar const* alpha_ptr{nullptr}; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementScalar const* beta_ptr{nullptr}; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
Params() = default;
CUTLASS_HOST_DEVICE
Params(
ElementScalar alpha,
ElementScalar beta
): alpha(alpha), beta(beta)
{}
CUTLASS_HOST_DEVICE
Params(
ElementScalar const *alpha_ptr,
ElementScalar const *beta_ptr
): alpha_ptr(alpha_ptr), beta_ptr(beta_ptr)
{}
};
private:
//
// Data members
//
ElementScalar alpha_;
ElementScalar beta_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationPlanarComplex(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::OnlyAlphaScaling) return false;
return beta_.real() != ElementCompute(0) || beta_.imag() != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_source{
source_converter(source.real),
source_converter(source.imag)};
ComputeFragment converted_accumulator{
accumulator_converter(accumulator.real),
accumulator_converter(accumulator.imag)};
multiplies<Array<ElementCompute, kCount> > mul_op;
multiply_add<Array<ElementCompute, kCount> > mul_add_op;
// Perform binary operations
// complex multiply: I = beta * C
ComputeFragment intermediate {
mul_op(beta_.real(), converted_source.real),
mul_op(beta_.real(), converted_source.imag)
};
intermediate.real = mul_add_op(-beta_.imag(), converted_source.imag, intermediate.real);
intermediate.imag = mul_add_op( beta_.imag(), converted_source.real, intermediate.imag);
// complex multiply-add: I = alpha * AB + I
intermediate.real = mul_add_op(alpha_.real(), converted_accumulator.real, intermediate.real);
intermediate.imag = mul_add_op(alpha_.real(), converted_accumulator.imag, intermediate.imag);
intermediate.real = mul_add_op(-alpha_.imag(), converted_accumulator.imag, intermediate.real);
intermediate.imag = mul_add_op( alpha_.imag(), converted_accumulator.real, intermediate.imag);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return FragmentOutput{
destination_converter(intermediate.real),
destination_converter(intermediate.imag)};
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_accumulator{
accumulator_converter(accumulator.real),
accumulator_converter(accumulator.imag)};
// Perform binary operations
multiplies<Array<ElementCompute, kCount> > mul_op;
multiply_add<Array<ElementCompute, kCount> > mul_add_op;
// complex multiply-add: I = alpha * AB + I
ComputeFragment intermediate {
mul_op(alpha_.real(), converted_accumulator.real),
mul_op(alpha_.real(), converted_accumulator.imag)
};
intermediate.real = mul_add_op(-alpha_.imag(), converted_accumulator.imag, intermediate.real);
intermediate.imag = mul_add_op( alpha_.imag(), converted_accumulator.real, intermediate.imag);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return FragmentOutput{
destination_converter(intermediate.real),
destination_converter(intermediate.imag)};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/thread/linear_combination_planar_complex.h/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination_planar_complex.h",
"repo_id": "include",
"token_count": 2942
} | 21 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_blas3.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator_mixed.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/threadblock/interleaved_epilogue.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess,
/// Is for a symmetric kernel
BlasMode BlasMode_ = BlasMode::kGemm
>
struct DefaultEpilogueTensorOpBlas3 {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
static BlasMode const kBlasMode = BlasMode_;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorBlas3<
OutputTileThreadMap,
ElementOutput,
kBlasMode
>;
using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value,
cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC>,
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC> >::type;
/// Support several implementations depending on structure of epilogue
using DefaultIterators = detail::DefaultIteratorsTensorOp<
ElementOutput,
ElementAccumulator,
kElementsPerAccess,
Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename OutputTileThreadMap::CompactedThreadMap
>;
using WarpTileIterator = typename DefaultIterators::WarpTileIterator;
using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/default_epilogue_tensor_op_blas3.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/default_epilogue_tensor_op_blas3.h",
"repo_id": "include",
"token_count": 2492
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/numeric_types.h"
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename ElementAccumulator_,
typename ElementOutput_,
typename ThreadBlockShape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
bool ReduceKForA_
>
class EpilogueGemmKReduction {
public:
using ThreadBlockShape = ThreadBlockShape_;
using WarpMmaOperator = WarpMmaOperator_;
using WarpShape = typename WarpMmaOperator::Shape;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// Accumulator element
using ElementAccumulator = ElementAccumulator_;
/// Output element
using ElementOutput = ElementOutput_;
/// Output access size
static int const kElementsPerAccess = 1;
static bool const kReduceKForA = ReduceKForA_;
static int const kThreadBlockSize = kReduceKForA ? ThreadBlockShape::kM : ThreadBlockShape::kN;
static int const kWarpSize = kReduceKForA ? WarpShape::kM : WarpShape::kN;
static int const kIterations = kWarpSize / 8;
using FragmentAccumulator = Array<ElementAccumulator, kIterations>;
private:
int thread_offset_;
ElementOutput* pointer_;
int col_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueGemmKReduction(
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx, ///< Id of thread within warp
int threadblock_offset,
ElementOutput* pointer
)
{
col_ = lane_idx % 4;
thread_offset_ = threadblock_offset * kThreadBlockSize
+ warp_idx * kWarpSize
+ lane_idx / 4 + col_ * 8;
pointer_ = pointer + LongIndex(thread_offset_);
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
int size,
FragmentAccumulator &gemm_k_with_reduction_accumulation,
bool LoadForSerialSplitK
) {
bool guard[kIterations / 4];
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations / 4; ++i) {
guard[i] = ((thread_offset_ + i * 32) < size);
}
Array<ElementOutput, kIterations / 4> source;
source.clear();
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations / 4; ++i) {
ElementOutput *source_ptr = reinterpret_cast<ElementOutput *>(&source);
cutlass::arch::global_load<ElementOutput, sizeof(ElementOutput)>(
source_ptr[i],
(void *)(pointer_ + i * 32),
guard[i] && LoadForSerialSplitK);
}
FragmentAccumulator sum = gemm_k_with_reduction_accumulation;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations; ++i) {
sum[i] += __shfl_xor_sync(0xffffffff, sum[i], 1);
sum[i] += __shfl_xor_sync(0xffffffff, sum[i], 2);
}
Array<ElementAccumulator, kIterations / 4> intermediate;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations / 4; ++i) {
if (col_ == 0) {
intermediate[i] = sum[0 + i * 4];
}
if (col_ == 1) {
intermediate[i] = sum[1 + i * 4];
}
if (col_ == 2) {
intermediate[i] = sum[2 + i * 4];
}
if (col_ == 3) {
intermediate[i] = sum[3 + i * 4];
}
}
NumericArrayConverter<ElementAccumulator, ElementOutput, kIterations / 4> source_converter;
Array<ElementAccumulator, kIterations / 4> converted_source = source_converter(source);
plus<Array<ElementAccumulator, kIterations / 4>> plus_source;
intermediate = plus_source(intermediate, converted_source);
NumericArrayConverter<ElementOutput, ElementAccumulator, kIterations / 4> converter;
Array<ElementOutput, kIterations / 4> result = converter(intermediate);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations / 4; ++i) {
cutlass::arch::global_store<ElementOutput, sizeof(ElementOutput)>(result[i],
(void *)(pointer_ + i * 32), guard[i]);
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_gemm_k_reduction.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_gemm_k_reduction.h",
"repo_id": "include",
"token_count": 2738
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base_streamk.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator without splitk
template <
/// Shape of threadblock tile (concept: GemmShape)
typename Shape_,
/// Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
typename WarpMmaOperator_,
/// Number of partitions of the K dimension
int PartitionsK,
/// Tile iterator reading and writing output tensors
typename OutputTileIterator_,
/// Fragment iterator selecting accumulators
typename AccumulatorFragmentIterator_,
/// Output operator
typename OutputOp_,
/// Number of interleaved k
int InterleavedK>
class InterleavedEpilogue :
public EpilogueBaseStreamK<
Shape_,
PartitionsK,
WarpMmaOperator_,
AccumulatorFragmentIterator_>
{
public:
using BaseStreamK = EpilogueBaseStreamK<
Shape_,
PartitionsK,
WarpMmaOperator_,
AccumulatorFragmentIterator_>;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using OutputTileIterator = OutputTileIterator_;
using OutputOp = OutputOp_;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Fragment type used by the accumulator tile's fragment iterator
using AccumulatorFragment = typename AccumulatorFragmentIterator::Fragment;
/// Accumulator element
using ElementAccumulator = typename AccumulatorTile::Element;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef =
typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<typename OutputTileIterator::Element,
OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType =
Array<ElementAccumulator, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount =
gemm::GemmShape<Shape::kM / WarpMmaOperator::Shape::kM,
Shape::kN / WarpMmaOperator::Shape::kN, kPartitionsK>;
public:
static_assert(OutputTileIterator::kElementsPerAccess,
"This must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements %
OutputTileIterator::kElementsPerAccess),
"Divisibility");
public:
/// Aspect for when epilogue source is not needed
struct SourceAspectNotNeeded
{
/// Constructor
CUTLASS_DEVICE
SourceAspectNotNeeded()
{}
/// Invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator(
typename OutputTileIterator::Fragment &output_fragment,
OutputOp const &output_op,
typename AccumulatorFragmentIterator::Fragment const &aligned_accum_fragment)
{
OutputAccessType *output_frag_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment);
AccumulatorAccessType const *compute_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i)
{
// Call the output operator
output_frag_ptr[i] = output_op(compute_frag_ptr[i]);
}
}
};
/// Aspect for when epilogue source is needed
struct SourceAspectNeeded
{
OutputTileIterator source_iterator;
typename OutputTileIterator::Fragment source_fragment;
/// Invoke the output functor over each vector of output
CUTLASS_DEVICE
static void apply_output_operator(
typename OutputTileIterator::Fragment &output_fragment,
OutputOp const &output_op,
typename AccumulatorFragmentIterator::Fragment const &aligned_accum_fragment,
typename OutputTileIterator::Fragment const &source_fragment)
{
OutputAccessType *output_frag_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment);
AccumulatorAccessType const *compute_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
OutputAccessType const *source_frag_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i)
{
// Call the output operator
output_frag_ptr[i] = output_op(compute_frag_ptr[i], source_frag_ptr[i]);
}
}
/// Constructor
CUTLASS_DEVICE
SourceAspectNeeded(OutputTileIterator source_iterator) :
source_iterator(source_iterator)
{
source_fragment.clear();
}
/// Invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator(
typename OutputTileIterator::Fragment &output_fragment,
OutputOp const &output_op,
typename AccumulatorFragmentIterator::Fragment const &aligned_accum_fragment)
{
// Load addend source fragment from global memory
source_iterator.load(source_fragment);
++source_iterator;
apply_output_operator(output_fragment, output_op, aligned_accum_fragment, source_fragment);
}
};
/// Shared storage allocation needed by the epilogue
struct SharedStorage {};
public:
/// Constructor
CUTLASS_DEVICE
InterleavedEpilogue(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx) ///< Id of thread within warp
:
BaseStreamK(thread_idx)
{}
/// Aggregates the accumulator sets shared by peer blocks in the global workspace,
/// performing epilogue computations, writing to output
CUTLASS_DEVICE
void reduce(
int peer_idx_begin,
int peer_idx_end,
int reduce_fragment_idx,
void *element_workspace,
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
OutputTileIterator source_iterator) ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
{
// Redcuce peer accumulator fragments into one fragment
AccumulatorFragment accum_fragment;
BaseStreamK::reduce(accum_fragment, peer_idx_begin, peer_idx_end, reduce_fragment_idx, element_workspace);
// Source-fragment data (zero-initialized for scenarios where the
// output operator allows us to skip loading it from global input)
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
if (output_op.is_source_needed())
{
source_iterator += reduce_fragment_idx;
source_iterator.load(source_fragment);
}
// Compute the output result
typename OutputTileIterator::Fragment output_fragment;
// Apply the output operator
SourceAspectNeeded::apply_output_operator(output_fragment, output_op, accum_fragment, source_fragment);
// Store the final result
destination_iterator += reduce_fragment_idx;
destination_iterator.store(output_fragment);
}
/// Perform the epilogue computations and stream the result to global memory.
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators) ///< Complete warp-level accumulator tile
{
operator()(output_op, destination_iterator, accumulators, SourceAspectNotNeeded());
}
/// Perform the epilogue computations and stream the result to global memory. Implements
/// two alternative codepaths, depending on whether the output op requires addend data to be loaded.
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator ) ///< Tile iterator for addend source
{
if (output_op.is_source_needed())
{
operator()(output_op, destination_iterator, accumulators, SourceAspectNeeded(source_iterator));
}
else
{
operator()(output_op, destination_iterator, accumulators, SourceAspectNotNeeded());
}
}
/// Perform the epilogue computations and stream the result to global memory. Implements a
/// single codepath, regardless of whether the output op requires addend data to be loaded
CUTLASS_DEVICE
void unified(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator ) ///< Tile iterator for addend source
{
if (!output_op.is_source_needed())
{
source_iterator.clear_mask();
__syncthreads(); // Dummy (CUDA 11.0)
}
operator()(output_op, destination_iterator, accumulators, SourceAspectNeeded(source_iterator));
}
/// Streams the result to global memory
template <typename SourceAspect>
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
SourceAspect source)
{
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Convert fragment
//
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
//
// Compute the output result
//
typename OutputTileIterator::Fragment output_fragment;
source.apply_output_operator(output_fragment, output_op, accum_fragment);
//
// Store the final result
//
destination_iterator.set_iteration_index(iter);
destination_iterator.store(output_fragment);
++destination_iterator;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/interleaved_epilogue.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/interleaved_epilogue.h",
"repo_id": "include",
"token_count": 4850
} | 24 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines categories for floating point numbers for use in NVRTC-compiled code
*/
#pragma once
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
// All floating-point numbers can be put in one of these categories.
enum {
FP_NAN =
# define FP_NAN 0
FP_NAN,
FP_INFINITE =
# define FP_INFINITE 1
FP_INFINITE,
FP_ZERO =
# define FP_ZERO 2
FP_ZERO,
FP_SUBNORMAL =
# define FP_SUBNORMAL 3
FP_SUBNORMAL,
FP_NORMAL =
# define FP_NORMAL 4
FP_NORMAL
};
CUTLASS_HOST_DEVICE
int fpclassify(float const& f) {
uint32_t s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<uint32_t const &>(f);
#else
std::memcpy(&s, &f, sizeof(s));
#endif
uint32_t exp = s & 0x7f800000;
uint32_t mantissa = s & 0x007fffff;
if (exp == 0x7f800000) {
if (mantissa) {
return FP_NAN;
}
else {
return FP_INFINITE;
}
}
else if (!exp) {
if (mantissa) {
return FP_SUBNORMAL;
}
else {
return FP_ZERO;
}
}
return FP_NORMAL;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/floating_point_nvrtc.h/0 | {
"file_path": "include/cutlass/floating_point_nvrtc.h",
"repo_id": "include",
"token_count": 971
} | 25 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/trace.h"
#include "cute/arch/cluster_sm90.hpp"
#include "cute/arch/copy_sm90.hpp"
#include "cute/algorithm/functional.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/algorithm/gemm.hpp"
#include "cute/tensor_predicate.hpp"
#include "cute/numeric/arithmetic_tuple.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
using namespace cute;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int Stages,
class ClusterShape,
int PipelineAsyncMmaStages,
class TileShape_,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm90TmaGmma<Stages, ClusterShape, PipelineAsyncMmaStages>,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm90TmaGmma<Stages, ClusterShape, PipelineAsyncMmaStages>;
using TileShape = TileShape_;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
using CtaShape_MNK = decltype(shape_div(TileShape{}, ClusterShape{}));
using MainloopPipeline = cutlass::PipelineTmaAsync<DispatchPolicy::Stages>;
using PipelineParams = typename MainloopPipeline::Params;
using PipelineState = typename cutlass::PipelineState<DispatchPolicy::Stages>;
static constexpr int ThreadCount = CUTE_STATIC_V(size(TiledMma{}));
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
// Tile along modes in a way that maximizes the TMA box size.
using SmemLayoutA = decltype(tile_to_shape(
SmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}),
cute::conditional_t< ::cutlass::gemm::detail::is_major<0,StrideA>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{}));
using SmemLayoutB = decltype(tile_to_shape(
SmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}),
cute::conditional_t< ::cutlass::gemm::detail::is_major<0,StrideB>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{}));
static_assert(DispatchPolicy::Stages >= 2, "Specialization requires Stages set to value 1 or more.");
static_assert(cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeA>::value &&
cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeB>::value,
"MMA atom must source both A and B operand from smem_desc for this mainloop.");
static_assert(cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>,
"GmemTiledCopy - invalid SM90 TMA copy atom specified.");
static_assert(cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>,
"GmemTiledCopy - invalid SM90 TMA copy atom specified.");
// TMA converts f32 input to tf32 when copying from GMEM to SMEM
// For all other types, cast to size equivalent uint type to avoid any rounding by TMA.
static constexpr bool ConvertF32toTF32A = cute::is_same_v<float, ElementA>;
static constexpr bool ConvertF32toTF32B = cute::is_same_v<float, ElementB>;
using InternalElementA = cute::conditional_t<ConvertF32toTF32A, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementA>>>;
using InternalElementB = cute::conditional_t<ConvertF32toTF32B, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementB>>>;
struct SharedStorage {
cute::array_aligned<typename TiledMma::ValTypeA, cute::cosize_v<SmemLayoutA>> smem_A;
cute::array_aligned<typename TiledMma::ValTypeB, cute::cosize_v<SmemLayoutB>> smem_B;
using PipelineStorage = typename MainloopPipeline::SharedStorage;
alignas(16) PipelineStorage pipeline_storage;
};
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A;
StrideA dA;
ElementB const* ptr_B;
StrideB dB;
uint32_t mma_promotion_interval = 4;
};
// Device side kernel params
struct Params {
// Assumption: StrideA is congruent with Problem_MK
using TMA_A = decltype(make_tma_copy(
GmemTiledCopyA{},
make_tensor(static_cast<InternalElementA const*>(nullptr), repeat_like(StrideA{}, int32_t(0)), StrideA{}),
SmemLayoutA{}(_,_,0),
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})),
size<1>(ClusterShape{}))); // mcast along N mode for this M load, if any
// Assumption: StrideB is congruent with Problem_NK
using TMA_B = decltype(make_tma_copy(
GmemTiledCopyB{},
make_tensor(static_cast<InternalElementB const*>(nullptr), repeat_like(StrideB{}, int32_t(0)), StrideB{}),
SmemLayoutB{}(_,_,0),
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})),
size<0>(ClusterShape{}))); // mcast along M mode for this N load, if any
TMA_A tma_load_a;
TMA_B tma_load_b;
};
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
(void) workspace;
// Optionally append 1s until problem shape is rank-4 (MNKL), in case it is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
auto ptr_A = reinterpret_cast<InternalElementA const*>(args.ptr_A);
auto ptr_B = reinterpret_cast<InternalElementB const*>(args.ptr_B);
Tensor tensor_a = make_tensor(ptr_A, make_layout(make_shape(M,K,L), args.dA));
Tensor tensor_b = make_tensor(ptr_B, make_layout(make_shape(N,K,L), args.dB));
typename Params::TMA_A tma_load_a = make_tma_copy(
GmemTiledCopyA{},
tensor_a,
SmemLayoutA{}(_,_,cute::Int<0>{}),
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})),
size<1>(ClusterShape{})); // mcast along N mode for this M load, if any
typename Params::TMA_B tma_load_b = make_tma_copy(
GmemTiledCopyB{},
tensor_b,
SmemLayoutB{}(_,_,cute::Int<0>{}),
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})),
size<0>(ClusterShape{})); // mcast along M mode for this N load, if any
return {
tma_load_a,
tma_load_b
};
}
template<class ProblemShape>
static bool
can_implement(
ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
constexpr int tma_alignment_bits = 128;
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
bool implementable = true;
constexpr int min_tma_aligned_elements_A = tma_alignment_bits / cutlass::sizeof_bits<ElementA>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_A>(cute::make_shape(M,K,L), StrideA{});
constexpr int min_tma_aligned_elements_B = tma_alignment_bits / cutlass::sizeof_bits<ElementB>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_B>(cute::make_shape(N,K,L), StrideB{});
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n");
}
return implementable;
}
/// Issue Tma Descriptor Prefetch -- ideally from a single thread for best performance
CUTLASS_DEVICE
static void prefetch_tma_descriptors(Params const& mainloop_params) {
cute::prefetch_tma_descriptor(mainloop_params.tma_load_a.get_tma_descriptor());
cute::prefetch_tma_descriptor(mainloop_params.tma_load_b.get_tma_descriptor());
}
/// Perform a collective-scoped matrix multiply-accumulate
/// Producer Perspective
template <
class TensorA, class TMA_LOAD_A,
class TensorB, class TMA_LOAD_B,
class FrgTensorC,
class KTileIterator
>
CUTLASS_DEVICE void
operator() (
TensorA const& gA, TMA_LOAD_A& tma_load_a,
TensorB const& gB, TMA_LOAD_B& tma_load_b,
FrgTensorC& accum,
KTileIterator k_tile_iter, int k_tile_count,
int thread_idx,
uint32_t block_rank_in_cluster,
char* shared_memory,
Params const& mainloop_params)
{
using namespace cute;
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2.");
static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::is_void_v<SmemCopyAtomA>,
"SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions.");
static_assert(cute::is_void_v<SmemCopyAtomB>,
"SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions.");
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(shared_memory);
Tensor sA = make_tensor(make_smem_ptr(storage.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(storage.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
//
// Prepare the TMA loads for A and B
//
constexpr uint32_t cluster_shape_x = get<0>(ClusterShape());
uint2 cluster_local_block_id = {block_rank_in_cluster % cluster_shape_x, block_rank_in_cluster / cluster_shape_x};
auto block_tma_a = tma_load_a.get_slice(cluster_local_block_id.y);
auto block_tma_b = tma_load_b.get_slice(cluster_local_block_id.x);
// Applies the mapping from block_tma_a
Tensor tAgA = block_tma_a.partition_S(gA); // (TMA,TMA_M,TMA_K,k)
Tensor tAsA = block_tma_a.partition_D(sA); // (TMA,TMA_M,TMA_K,PIPE)
Tensor tBgB = block_tma_b.partition_S(gB); // (TMA,TMA_N,TMA_K,k)
Tensor tBsB = block_tma_b.partition_D(sB); // (TMA,TMA_N,TMA_K,PIPE)
//
// Prepare TMA membars and PREFETCH
//
// Number of pipelined k-tiles in smem
constexpr int K_PIPE_MAX = DispatchPolicy::Stages;
// NOTE: Another parameter: Partition the pipeline between active MMAs and active TMAs
// Tunable via the dispatch policy to tollerate latencies evenly across the math and compute stages
// K_PIPE_MMAS: The max number of active MMA pipes at beginning of every loop
// K_PIPE_TMAS: The max number of active TMA pipes at beginning of every loop (geq 1)
constexpr int K_PIPE_MMAS = DispatchPolicy::PipelineAsyncMmaStages;
constexpr int K_PIPE_TMAS = K_PIPE_MAX - K_PIPE_MMAS;
static_assert(0 <= K_PIPE_MMAS && K_PIPE_MMAS < K_PIPE_MAX);
static_assert(0 < K_PIPE_TMAS && K_PIPE_TMAS <= K_PIPE_MAX);
static_assert(K_PIPE_MMAS < K_PIPE_MAX - 1);
// Set the bytes transferred in this TMA transaction (may involve multiple issues)
constexpr uint32_t TmaTransactionBytes = static_cast<uint32_t>(
cutlass::bits_to_bytes(size<0>(sA) * size<1>(sA) * sizeof_bits<InternalElementA>::value) +
cutlass::bits_to_bytes(size<0>(sB) * size<1>(sB) * sizeof_bits<InternalElementB>::value));
// Obtain warp index
int warp_idx = canonical_warp_idx_sync();
int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup;
PipelineParams params;
params.transaction_bytes = TmaTransactionBytes;
params.role = MainloopPipeline::ThreadCategory::ProducerConsumer;
params.is_leader = warp_group_thread_idx == 0;
params.num_consumers = NumThreadsPerWarpGroup;
MainloopPipeline pipeline(storage.pipeline_storage, params, ClusterShape{});
// State variables used for iterating the circular buffer
// smem_pipe_read / release is used by the consumer of SMEM data - i.e MMA
// smem_pipe_write is used by the producer of SMEM data - i.e TMA
PipelineState smem_pipe_read;
PipelineState smem_pipe_release;
PipelineState smem_pipe_write = cutlass::make_producer_start_state<MainloopPipeline>();
// We need this to guarantee that the Pipeline init is visible
// To all producers and consumer blocks in the Cluster
if constexpr (size(ClusterShape{}) > 1) {
cute::cluster_arrive_relaxed();
cute::cluster_wait();
}
else {
__syncthreads();
}
// Set predicate for the lowest lane_id in the warp
int lane_predicate = cute::elect_one_sync();
uint16_t mcast_mask_a = 0;
uint16_t mcast_mask_b = 0;
// Keep a copy to know when to stop issuing loads
int k_tile_count_tma = k_tile_count;
// Issue TmaLoads (Prologue fetches)
if (warp_idx == 0 && lane_predicate == 1) {
// Maps the tile -> block, value
if constexpr (cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>) {
auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id
for (int n = 0; n < size<1>(block_layout); ++n) {
mcast_mask_a |= (uint16_t(1) << block_layout(cluster_local_block_id.x,n,Int<0>{}));
}
}
if constexpr (cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>) {
auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id
for (int m = 0; m < size<0>(block_layout); ++m) {
mcast_mask_b |= (uint16_t(1) << block_layout(m,cluster_local_block_id.y,Int<0>{}));
}
}
// Issue the prologue loads
int prologue_tma_count = min(K_PIPE_MAX, k_tile_count);
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < prologue_tma_count; ++stage) {
pipeline.producer_acquire(smem_pipe_write);
using BarrierType = typename MainloopPipeline::ProducerBarrierType;
BarrierType* tma_barrier = pipeline.producer_get_barrier(smem_pipe_write);
copy(tma_load_a.with(*tma_barrier, mcast_mask_a), tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,stage));
copy(tma_load_b.with(*tma_barrier, mcast_mask_b), tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,stage));
++k_tile_iter;
++smem_pipe_write;
}
k_tile_count_tma -= prologue_tma_count;
}
//
// Define C accumulators and A/B partitioning
//
// Layout of warp group to thread mapping
static_assert(stride<0>(typename TiledMma::ALayout{}) == 0 and
stride<0>(typename TiledMma::BLayout{}) == 0 and
size<0>(typename TiledMma::ALayout{}) == NumThreadsPerWarpGroup and
size<0>(typename TiledMma::BLayout{}) == NumThreadsPerWarpGroup,
"Stride of the first mode must be 0 and the size of the mode must be NumThreadsPerWarpGroup");
constexpr int MmaWarpGroups = size(TiledMma{}) / NumThreadsPerWarpGroup;
Layout warp_group_thread_layout = make_layout(Int<MmaWarpGroups>{},
Int<NumThreadsPerWarpGroup>{});
int warp_group_idx = __shfl_sync(0xFFFFFFFF, thread_idx / NumThreadsPerWarpGroup, 0);
TiledMma tiled_mma;
auto thread_mma = tiled_mma.get_slice(warp_group_thread_layout(warp_group_idx));
Tensor tCsA = thread_mma.partition_A(sA); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCsB = thread_mma.partition_B(sB); // (MMA,MMA_N,MMA_K,PIPE)
// Allocate "fragments/descriptors"
Tensor tCrA = thread_mma.make_fragment_A(tCsA); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCrB = thread_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K,PIPE)
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(accum)); // M
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<2>(accum)); // N
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // K
CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tCsB)); // PIPE
CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tAsA)); // PIPE
CUTE_STATIC_ASSERT_V(size<3>(tCsB) == size<3>(tBsB)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE
__syncthreads();
tiled_mma.accumulate_ = GMMA::ScaleOut::Zero;
warpgroup_fence_operand(accum);
// Prologue MMAs
assert(k_tile_count >= 1);
{
// WAIT on smem_pipe_read until it's data is available
pipeline.consumer_wait(smem_pipe_read);
warpgroup_arrive();
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) {
// (V,M,K) x (V,N,K) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block,smem_pipe_read.index()), tCrB(_,_,k_block,smem_pipe_read.index()), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
}
warpgroup_commit_batch();
++smem_pipe_read;
--k_tile_count;
}
CUTLASS_PRAGMA_UNROLL
for (int prologue_mma_count = min(K_PIPE_MMAS, k_tile_count) - 1;
prologue_mma_count > 0; --prologue_mma_count)
{
// WAIT on smem_pipe_read until it's data is available
pipeline.consumer_wait(smem_pipe_read);
warpgroup_arrive();
// (V,M,K) x (V,N,K) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,_,smem_pipe_read.index()), tCrB(_,_,_,smem_pipe_read.index()), accum);
warpgroup_commit_batch();
++smem_pipe_read;
--k_tile_count;
}
warpgroup_fence_operand(accum);
//
// PIPELINED MAIN LOOP
//
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 0; --k_tile_count)
{
// WAIT on smem_pipe_read until data is available
pipeline.consumer_wait(smem_pipe_read);
//
// Compute on k_tile
//
warpgroup_fence_operand(accum);
warpgroup_arrive();
// (V,M,K) x (V,N,K) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,_,smem_pipe_read.index()), tCrB(_,_,_,smem_pipe_read.index()), accum);
warpgroup_commit_batch();
/// Wait on the GMMA barrier for K_PIPE_MMAS (or fewer) outstanding to ensure smem_pipe_write is consumed
warpgroup_wait<K_PIPE_MMAS>();
warpgroup_fence_operand(accum);
pipeline.consumer_release(smem_pipe_release); // UNLOCK wr stage, done _computing_ on it
//
// Copy gmem to smem for *k_tile_iter
//
// Do Acquire & Load only if needed - helps with both performance and also corner case illegal barrier-ops
if (warp_idx == 0 && lane_predicate == 1 && (k_tile_count_tma > 0) ) {
pipeline.producer_acquire(smem_pipe_write); // LOCK wr stage, for _writing_
using BarrierType = typename MainloopPipeline::ProducerBarrierType;
BarrierType* tma_barrier = pipeline.producer_get_barrier(smem_pipe_write);
copy(tma_load_a.with(*tma_barrier, mcast_mask_a), tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,smem_pipe_write.index()));
copy(tma_load_b.with(*tma_barrier, mcast_mask_b), tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,smem_pipe_write.index()));
++smem_pipe_write;
++k_tile_iter;
--k_tile_count_tma;
}
// Advance consumer pipeline
++smem_pipe_read;
++smem_pipe_release;
}
// Wait on all GMMAs
warpgroup_wait<0>();
warpgroup_fence_operand(accum);
// Workaround for ensuring Smem destruction doesn't happen accidentally
if constexpr (size(typename DispatchPolicy::ClusterShape{}) > 1) {
cute::cluster_arrive();
cute::cluster_wait();
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss.hpp/0 | {
"file_path": "include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss.hpp",
"repo_id": "include",
"token_count": 9902
} | 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/sparse_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_sparse_with_visitor.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/epilogue/threadblock/fusion/visitor_2x.hpp"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/*! Sparse GEMM with visitor
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassSimt,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm80,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename FusionCallbacks_ =
typename cutlass::epilogue::threadblock::detail::EmptyCallbacks,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
typename threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator,
/// Number of stages used in the pipelined epilogue
int EpilogueStages = 1>
class SparseGemmWithVisitor {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using FusionCallbacks = FusionCallbacks_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
using MathOperator = Operator;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
/// Define the kernel
using GemmKernel = typename kernel::DefaultSparseGemmWithVisitor<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
FusionCallbacks,
ThreadblockSwizzle,
kStages,
Operator,
EpilogueStages
>::GemmKernel;
using ElementE = typename GemmKernel::ElementE;
using LayoutE = typename GemmKernel::LayoutE;
static int const kAlignmentE = 128 / sizeof_bits<ElementE>::value;
static int const kSparse = GemmKernel::kSparse;
static int const kMetaSizeInBits = GemmKernel::kMetaSizeInBits;
static int const kElementsPerElementE = GemmKernel::kElementsPerElementE;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A;
TensorRef<ElementB const, LayoutB> ref_B;
TensorRef<ElementE const, LayoutE> ref_E;
typename FusionCallbacks::Arguments epilogue;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments(): problem_size(0, 0, 0) {
}
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A_,
TensorRef<ElementB const, LayoutB> ref_B_,
TensorRef<ElementE, LayoutE> ref_E_,
typename FusionCallbacks::Arguments epilogue_ =
typename FusionCallbacks::Arguments()
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_E(ref_E_),
epilogue(epilogue_) {
}
};
private:
/// Kernel parameters object
typename GemmKernel::Params params_;
public:
/// Constructs the GEMM.
SparseGemmWithVisitor() { }
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
Status status = GemmKernel::can_implement(
args.problem_size,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
cutlass::TensorRef<ElementC, LayoutC>(), // It only matters that it's empty.
cutlass::TensorRef<ElementC, LayoutC>(), // Same as above.
args.ref_E.non_const_ref()
);
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
return bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
constexpr int SplitKSlices = 1;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
SplitKSlices);
// Initialize the Params structure
params_ = typename GemmKernel::Params{
args.problem_size,
grid_shape,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_E.non_const_ref(),
args.epilogue
};
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
params_.ref_A.reset(args.ref_A.non_const_ref().data());
params_.ref_B.reset(args.ref_B.non_const_ref().data());
params_.ref_E.reset(args.ref_E.non_const_ref().data());
params_.output_op = args.epilogue;
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(GemmKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
cutlass::Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/gemm_sparse_with_visitor.h/0 | {
"file_path": "include/cutlass/gemm/device/gemm_sparse_with_visitor.h",
"repo_id": "include",
"token_count": 3980
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmPlanarComplexArray {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
using Operator = typename Mma::Operator;
using ArchTag = typename Mma::ArchTag;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(
128 / sizeof_bits<ElementA>::value,
128 / sizeof_bits<ElementB>::value);
//
// Additional types needed for reflection
//
using ElementAccumulator = typename Mma::Policy::Operator::ElementC;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::Shape;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
//
// Arguments structure
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue{};
int const *ptr_M{nullptr};
int const *ptr_N{nullptr};
int const *ptr_K{nullptr};
void const * const * ptr_A_real{nullptr};
void const * const * ptr_A_imag{nullptr};
void const * const * ptr_B_real{nullptr};
void const * const * ptr_B_imag{nullptr};
void const * const * ptr_C_real{nullptr};
void const * const * ptr_C_imag{nullptr};
void * const * ptr_D_real{nullptr};
void * const * ptr_D_imag{nullptr};
typename LayoutA::Stride::Index lda_real{};
typename LayoutA::Stride::Index lda_imag{};
typename LayoutB::Stride::Index ldb_real{};
typename LayoutB::Stride::Index ldb_imag{};
typename LayoutC::Stride::Index ldc_real{};
typename LayoutC::Stride::Index ldc_imag{};
typename LayoutC::Stride::Index ldd_real{};
typename LayoutC::Stride::Index ldd_imag{};
//
// Methods
//
Arguments() = default;
/// constructs an arguments structure
Arguments(
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
int const *ptr_M,
int const *ptr_N,
int const *ptr_K,
void const * const * ptr_A_real,
void const * const * ptr_A_imag,
void const * const * ptr_B_real,
void const * const * ptr_B_imag,
void const * const * ptr_C_real,
void const * const * ptr_C_imag,
void * const * ptr_D_real,
void * const * ptr_D_imag,
typename LayoutA::Stride::Index lda_real,
typename LayoutA::Stride::Index lda_imag,
typename LayoutB::Stride::Index ldb_real,
typename LayoutB::Stride::Index ldb_imag,
typename LayoutC::Stride::Index ldc_real,
typename LayoutC::Stride::Index ldc_imag,
typename LayoutC::Stride::Index ldd_real,
typename LayoutC::Stride::Index ldd_imag)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_M(ptr_M),
ptr_N(ptr_N),
ptr_K(ptr_K),
ptr_A_real(ptr_A_real),
ptr_A_imag(ptr_A_imag),
ptr_B_real(ptr_B_real),
ptr_B_imag(ptr_B_imag),
ptr_C_real(ptr_C_real),
ptr_C_imag(ptr_C_imag),
ptr_D_real(ptr_D_real),
ptr_D_imag(ptr_D_imag),
lda_real(lda_real),
lda_imag(lda_imag),
ldb_real(ldb_real),
ldb_imag(ldb_imag),
ldc_real(ldc_real),
ldc_imag(ldc_imag),
ldd_real(ldd_real),
ldd_imag(ldd_imag)
{}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_M, args.ptr_N);
std::swap(args.ptr_A_real, args.ptr_B_real);
std::swap(args.ptr_A_imag, args.ptr_B_imag);
std::swap(args.lda_real, args.ldb_real);
std::swap(args.lda_imag, args.ldb_imag);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
//
// Data members
//
typename Mma::IteratorA::Params params_A_real{};
typename Mma::IteratorA::Params params_A_imag{};
typename Mma::IteratorB::Params params_B_real{};
typename Mma::IteratorB::Params params_B_imag{};
typename Epilogue::OutputTileIterator::Params params_C_real{};
typename Epilogue::OutputTileIterator::Params params_C_imag{};
typename Epilogue::OutputTileIterator::Params params_D_real{};
typename Epilogue::OutputTileIterator::Params params_D_imag{};
typename EpilogueOutputOp::Params output_op{};
int const *ptr_M{nullptr};
int const *ptr_N{nullptr};
int const *ptr_K{nullptr};
void const * const * ptr_A_real{nullptr};
void const * const * ptr_A_imag{nullptr};
void const * const * ptr_B_real{nullptr};
void const * const * ptr_B_imag{nullptr};
void const * const * ptr_C_real{nullptr};
void const * const * ptr_C_imag{nullptr};
void * const * ptr_D_real{nullptr};
void * const * ptr_D_imag{nullptr};
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
ptr_M(args.ptr_M),
ptr_N(args.ptr_N),
ptr_K(args.ptr_K),
params_A_real(args.lda_real),
params_A_imag(args.lda_imag),
params_B_real(args.ldb_real),
params_B_imag(args.ldb_imag),
params_C_real(args.ldc_real),
params_C_imag(args.ldc_imag),
params_D_real(args.ldd_real),
params_D_imag(args.ldd_imag),
output_op(args.epilogue),
ptr_A_real(args.ptr_A_real),
ptr_A_imag(args.ptr_A_imag),
ptr_B_real(args.ptr_B_real),
ptr_B_imag(args.ptr_B_imag),
ptr_C_real(args.ptr_C_real),
ptr_C_imag(args.ptr_C_imag),
ptr_D_real(args.ptr_D_real),
ptr_D_imag(args.ptr_D_imag)
{}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
ptr_M = args.ptr_M;
ptr_N = args.ptr_N;
ptr_K = args.ptr_K;
ptr_A_real = args.ptr_A_real;
ptr_A_imag = args.ptr_A_imag;
ptr_B_real = args.ptr_B_real;
ptr_B_imag = args.ptr_B_imag;
ptr_C_real = args.ptr_C_real;
ptr_C_imag = args.ptr_C_imag;
ptr_D_real = args.ptr_D_real;
ptr_D_imag = args.ptr_D_imag;
output_op = args.epilogue;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(Arguments const &args) {
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = args.problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = args.problem_size.m() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = args.problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = args.problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = args.problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = args.problem_size.m() % kAlignmentC;
}
if (isAMisaligned || isBMisaligned || isCMisaligned) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmPlanarComplexArray op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int batch_idx = threadblock_tile_offset.k();
int problem_size_m = params.problem_size.m();
int problem_size_n = params.problem_size.n();
int problem_size_k = params.problem_size.k();
ElementA *ptr_A_real = static_cast<ElementA *>(const_cast<void *>(params.ptr_A_real[batch_idx]));
ElementA *ptr_A_imag = static_cast<ElementA *>(const_cast<void *>(params.ptr_A_imag[batch_idx]));
ElementB *ptr_B_real = static_cast<ElementB *>(const_cast<void *>(params.ptr_B_real[batch_idx]));
ElementB *ptr_B_imag = static_cast<ElementB *>(const_cast<void *>(params.ptr_B_imag[batch_idx]));
//
// If pointers for problem sizes are specified, these are loaded from global memory
//
if (params.ptr_M) {
problem_size_m = params.ptr_M[batch_idx];
}
if (params.ptr_N) {
problem_size_n = params.ptr_N[batch_idx];
}
if (params.ptr_K) {
problem_size_k = params.ptr_K[batch_idx];
}
int const kBlockCountM = (problem_size_m + Mma::Shape::kM - 1) / Mma::Shape::kM;
int const kBlockCountN = (problem_size_n + Mma::Shape::kN - 1) / Mma::Shape::kN;
int const kGemmKIterations = (problem_size_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
//
// Each threadblock loops over the logical problem size which the kernel may have discovered
// after the grid is launched.
//
CUTLASS_PRAGMA_NO_UNROLL
for (int block_m = threadblock_tile_offset.m();
block_m < kBlockCountM;
block_m += params.grid_tiled_shape.m()) {
CUTLASS_PRAGMA_NO_UNROLL
for (int block_n = threadblock_tile_offset.n();
block_n < kBlockCountN;
block_n += params.grid_tiled_shape.n()) {
//
// Compute indices within threadblock and warp.
//
int thread_idx = threadIdx.x;
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Proceed with regular GEMM logic.
//
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{ block_m * Mma::Shape::kM, 0};
cutlass::MatrixCoord tb_offset_B{ 0, block_n * Mma::Shape::kN };
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A_real(
params.params_A_real,
ptr_A_real,
{problem_size_m, problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorA iterator_A_imag(
params.params_A_imag,
ptr_A_imag,
{problem_size_m, problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B_real(
params.params_B_real,
ptr_B_real,
{problem_size_k, problem_size_n},
thread_idx,
tb_offset_B);
typename Mma::IteratorB iterator_B_imag(
params.params_B_imag,
ptr_B_imag,
{problem_size_k, problem_size_n},
thread_idx,
tb_offset_B);
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
mma(
kGemmKIterations,
accumulators,
iterator_A_real,
iterator_A_imag,
iterator_B_real,
iterator_B_imag,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
//assume identity swizzle
MatrixCoord threadblock_offset(
block_m * Mma::Shape::kM,
block_n * Mma::Shape::kN
);
ElementC *ptr_C_real = static_cast<ElementC *>(const_cast<void *>(params.ptr_C_real[batch_idx]));
ElementC *ptr_C_imag = static_cast<ElementC *>(const_cast<void *>(params.ptr_C_imag[batch_idx]));
ElementC *ptr_D_real = static_cast<ElementC *>(params.ptr_D_real[batch_idx]);
ElementC *ptr_D_imag = static_cast<ElementC *>(params.ptr_D_imag[batch_idx]);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C_real(
params.params_C_real,
ptr_C_real,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_C_imag(
params.params_C_imag,
ptr_C_imag,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D_real(
params.params_D_real,
ptr_D_real,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_D_imag(
params.params_D_imag,
ptr_D_imag,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
//
// Construct epilogue
//
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D_real,
iterator_D_imag,
accumulators,
iterator_C_real,
iterator_C_imag);
} // for block_n
} // for block_m
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_planar_complex_array.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_planar_complex_array.h",
"repo_id": "include",
"token_count": 7703
} | 28 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
namespace detail
{
template<typename ElementAlphaBeta, bool BetaIsZero>
struct GemvBatchedStridedEpilogueScaling
{
ElementAlphaBeta const & alpha;
ElementAlphaBeta const & beta;
CUTLASS_DEVICE
GemvBatchedStridedEpilogueScaling(ElementAlphaBeta& alpha_, ElementAlphaBeta& beta_) :
alpha(alpha_), beta(beta_)
{ }
template<typename FragmentCD, typename FragmentAccumulator>
CUTLASS_DEVICE
void operator()(FragmentAccumulator& accumulators,
FragmentCD const& fragment_C,
FragmentCD& fragment_D) const
{
using AccType = typename FragmentAccumulator::value_type;
using CDType = typename FragmentCD::value_type;
static_assert(FragmentCD::kElements == FragmentAccumulator::kElements,
"Mistmatch in fragment sizes.");
for (int i = 0; i < FragmentCD::kElements; ++i)
{
if (BetaIsZero)
{
fragment_D[i] = CDType(accumulators[i] * AccType(alpha));
}
else
{
fragment_D[i] = CDType(accumulators[i] * AccType(alpha)
+ AccType(fragment_C[i]) * AccType(beta));
}
}
}
};
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename GemvKernel, typename ElementAlphaBeta, bool BetaIsZero=false>
CUTLASS_DEVICE void GemvBatchedStridedDevice(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
ElementAlphaBeta beta,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_C,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldc,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
using ThreadBlockGemv = typename GemvKernel::ThreadBlockGemv;
using ThreadBlockSwizzle = typename GemvKernel::ThreadBlockSwizzle;
using EpilogueScale = detail::GemvBatchedStridedEpilogueScaling<ElementAlphaBeta, BetaIsZero>;
ThreadBlockSwizzle swizzler;
// Compute initial location in logical coordinates
BatchedGemmCoord tb_offset = swizzler.get_tile_offset();
int const batch_idx = swizzler.get_batch_idx();
// Offset to the batch
ref_A.add_pointer_offset(batch_idx*lda);
ref_B.add_pointer_offset(batch_idx*ldb);
// Construct iterators to A and B operands
typename GemvKernel::IteratorA::Params params_A(ref_A.layout());
typename GemvKernel::IteratorA iterator_A(
params_A,
ref_A.data(),
{ 1, problem_size.k() },
0,
{ 0, 0 });
typename GemvKernel::IteratorB::Params params_B(ref_B.layout());
typename GemvKernel::IteratorB iterator_B(
params_B,
ref_B.data(),
{ problem_size.k(), problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
//
// Main loop
//
// Construct thread-scoped matrix multiply
ThreadBlockGemv mma;
typename ThreadBlockGemv::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped gemv
mma(problem_size.mnk(), accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
typename GemvKernel::FragmentCD fragment_CD;
// Load C (skip if beta is zero)
if (!BetaIsZero)
{
tb_offset = swizzler.get_tile_offset();
ref_C.add_pointer_offset(batch_idx*ldc);
typename GemvKernel::IteratorCD::Params params_C(ref_C.layout());
typename GemvKernel::IteratorCD iterator_C(
params_C,
ref_C.data(),
{ 1, problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
iterator_C.load(fragment_CD);
}
// Apply alpha/beta scaling
EpilogueScale epilogue_scale(alpha, beta);
epilogue_scale(accumulators, fragment_CD, fragment_CD);
// Store D
tb_offset = swizzler.get_tile_offset();
ref_D.add_pointer_offset(batch_idx*ldd);
typename GemvKernel::IteratorCD::Params params_D(ref_D.layout());
typename GemvKernel::IteratorCD iterator_D(
params_D,
ref_D.data(),
{ 1, problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
iterator_D.store(fragment_CD);
}
template <typename GemvKernel, typename ElementAlphaBeta, bool BetaIsZero>
CUTLASS_GLOBAL void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
ElementAlphaBeta beta,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_C,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldc,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, BetaIsZero>(
problem_size, alpha, beta, ref_A, lda, ref_B, ldb, ref_C, ldc, ref_D, ldd
);
}
template <typename GemvKernel, typename ElementAlphaBeta>
CUTLASS_GLOBAL void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, true>(
problem_size, alpha, ElementAlphaBeta(0), ref_A, lda, ref_B, ldb, ref_D, ldd, ref_D, ldd
);
}
template <typename GemvKernel>
CUTLASS_GLOBAL void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
using ElementAlphaBeta = typename GemvKernel::IteratorCD::Element;
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, true>(
problem_size, ElementAlphaBeta(1), ElementAlphaBeta(0), ref_A, lda, ref_B, ldb, ref_D, ldd, ref_D, ldd
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/kernel/gemv_batched_strided.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemv_batched_strided.h",
"repo_id": "include",
"token_count": 3192
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for multiply-add operations
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles conventional layouts for IDP4A
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_
>
struct Mma<
Shape_,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int32_t,
LayoutC_,
arch::OpMultiplyAdd,
bool> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = int8_t;
/// Layout of A matrix (concept: layout::MapFunc)
using LayoutA = layout::RowMajor;
/// Data type of operand B
using ElementB = int8_t;
/// Layout of B matrix (concept: layout::MapFunc)
using LayoutB = layout::ColumnMajor;
/// Element type of operand C
using ElementC = int32_t;
/// Layout of C matrix (concept: layout::MapFunc)
using LayoutC = LayoutC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Underlying matrix multiply operator (concept: arch::Mma)
// Use 1x1x4 IDP4A sequence for bulk of computation
using ArchMmaOperator = arch::Mma<
gemm::GemmShape<1,1,4>,
1,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
arch::OpMultiplyAdd>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TensorRef<ElementC, LayoutC> d(
reinterpret_cast<ElementC *>(&D), LayoutC::packed({ Shape::kM, Shape::kN }));
// Copy accumulators
D = C;
/// Use 1x1x4 IDP4A sequence for bulk of computation
ArchMmaOperator mma;
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK / ArchMmaOperator::Shape::kK; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
MatrixCoord mn(m, n);
Array<int8_t, 4> const *ptr_A = reinterpret_cast<Array<int8_t, 4> const *>(&A);
Array<int8_t, 4> const *ptr_B = reinterpret_cast<Array<int8_t, 4> const *>(&B);
Array<int32_t, 1> tmp = reinterpret_cast<Array<int32_t, 1> &>(d.at(mn));
mma(
tmp,
ptr_A[m * Shape::kK / ArchMmaOperator::Shape::kK + k],
ptr_B[n * Shape::kK / ArchMmaOperator::Shape::kK + k],
tmp);
d.at(mn) = reinterpret_cast<int32_t &>(tmp);
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles conventional layouts for IDP4A
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_
>
struct Mma<
Shape_,
int8_t,
layout::ColumnMajor,
int8_t,
layout::RowMajor,
int32_t,
LayoutC_,
arch::OpMultiplyAdd,
int8_t> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = int8_t;
/// Layout of A matrix (concept: layout::MapFunc)
using LayoutA = layout::ColumnMajor;
/// Data type of operand B
using ElementB = int8_t;
/// Layout of B matrix (concept: layout::MapFunc)
using LayoutB = layout::RowMajor;
/// Element type of operand C
using ElementC = int32_t;
/// Layout of C matrix (concept: layout::MapFunc)
using LayoutC = LayoutC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Underlying matrix multiply operator (concept: arch::Mma)
/// Use 1x1x4 IDP4A sequence for bulk of computation
using ArchMmaOperator = arch::Mma<
gemm::GemmShape<1,1,4>,
1,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
arch::OpMultiplyAdd>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TensorRef<ElementC, LayoutC> d(
reinterpret_cast<ElementC *>(&D), LayoutC::packed({ Shape::kM, Shape::kN }));
// Copy accumulators
D = C;
/// Underlying matrix multiply operator
ArchMmaOperator mma;
Array<int8_t, 4> const *ptr_A = reinterpret_cast<Array<int8_t, 4> const *>(&A);
Array<int8_t, 4> const *ptr_B = reinterpret_cast<Array<int8_t, 4> const *>(&B);
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK / ArchMmaOperator::Shape::kK; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
MatrixCoord mn(m, n);
Array<int32_t, 1> tmp = reinterpret_cast<Array<int32_t, 1> &>(d.at(mn));
mma(
tmp,
ptr_A[m + k * Shape::kM],
ptr_B[n + k * Shape::kN],
tmp);
d.at(mn) = reinterpret_cast<int32_t &>(tmp);
}
}
}
}
};
} // namespace thread
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/thread/mma_sm61.h/0 | {
"file_path": "include/cutlass/gemm/thread/mma_sm61.h",
"repo_id": "include",
"token_count": 2984
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Transformation applied to A operand
typename TransformA_ = NumericArrayConverter<
typename SmemIteratorA_::Element,
typename IteratorA_::Element,
IteratorA_::Fragment::kElements>,
///
/// Transformation applied to B operand
typename TransformB_ = NumericArrayConverter<
typename SmemIteratorB_::Element,
typename IteratorB_::Element,
IteratorB_::Fragment::kElements>,
/// Used for partial specialization
typename Enable = bool
>
class MmaPipelined : public MmaBase<Shape_, Policy_, 2> {
public:
///< Base class
using Base = MmaBase<Shape_, Policy_, 2>;
using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory
using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory
using ElementC = ElementC_; ///< Data type of accumulator matrix
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
using Policy = Policy_; ///< Policy describing tuning details
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
//
// Dependent types
//
/// Fragment of operand A loaded from global memory
using FragmentA = typename IteratorA::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB = typename IteratorB::Fragment;
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Obtain the arch tag from the warp-level operator
using ArchTag = typename Policy::Operator::ArchTag;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
static_assert((Base::kStages==2), "MmaPipelined requires kStages set to value 2");
protected:
//
// Data members
//
/// Warp-level MMA operator
Operator warp_mma;
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
///< transformation applied to A fragment
TransformA transform_A_;
///< transformation applied to B fragment
TransformB transform_B_;
/// Shared memory write stage index
int smem_write_stage_idx;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaPipelined(
typename Base::SharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM
int thread_idx, ///< ID within the threadblock
int warp_idx, ///< ID of warp
int lane_idx, ///< ID of each thread within a warp
TransformA transform_A = TransformA(), ///< transformation applied to A fragment
TransformB transform_B = TransformB() ///< transformation applied to B fragment
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx),
transform_A_(transform_A),
transform_B_(transform_B),
smem_write_stage_idx(0)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
/// Advance shared memory write-iterators to the next stage
CUTLASS_DEVICE
void advance_smem_write_stage()
{
++this->smem_iterator_A_;
++this->smem_iterator_B_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
smem_write_stage_idx ^= 1;
}
/// Advance shared memory read- and write-iterators to the next stage
CUTLASS_DEVICE
void advance_smem_stages()
{
++this->smem_iterator_A_;
++this->smem_iterator_B_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
// wrap write stage
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
else
{
// wrap read stage
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
}
smem_write_stage_idx ^= 1;
}
/// GEMM prologue. Bootstrap the global->shared memory pipeline by fetching
/// the global fragments needed by the first kStages-1 threadblock mainloop iterations
CUTLASS_DEVICE
void prologue(
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B, ///< [in|out] iterator over B operand in global memory
int &gemm_k_iterations) ///< [in|out] number of threadblock mainloop iterations remaining
{
// The last kblock is loaded in the prolog
// Load A fragment from global A
FragmentA tb_frag_A;
tb_frag_A.clear();
iterator_A.load(tb_frag_A);
++iterator_A;
// Load B fragment from global B
FragmentB tb_frag_B;
tb_frag_B.clear();
iterator_B.load(tb_frag_B);
++iterator_B;
// Store A and B fragments to shared
this->smem_iterator_A_.store(transform_A_(tb_frag_A));
this->smem_iterator_B_.store(transform_B_(tb_frag_B));
// Advance write stage
advance_smem_write_stage();
}
/// Wait until we have at least one completed global fetch stage
CUTLASS_DEVICE
void gmem_wait()
{
__syncthreads();
}
/// Perform the specified number of threadblock mainloop iterations of matrix
/// multiply-accumulate. Assumes prologue has been initiated.
CUTLASS_DEVICE
void gemm_iters(
int gemm_k_iterations, ///< number of threadblock mainloop iterations
FragmentC &accum, ///< [in|out] accumulator tile
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B) ///< [in|out] iterator over B operand in global memory
{
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA warp_frag_A[2];
WarpFragmentB warp_frag_B[2];
// Load A fragment from shared A
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_A[0]);
++this->warp_tile_iterator_A_;
// Load B fragment from shared B
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_B_.load(warp_frag_B[0]);
++this->warp_tile_iterator_B_;
// Pair of fragments used to overlap global memory loads and math instructions;
FragmentA tb_frag_A;
FragmentB tb_frag_B;
// Avoid reading out of bounds
iterator_A.clear_mask(gemm_k_iterations <= 1);
iterator_B.clear_mask(gemm_k_iterations <= 1);
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(transform_A_(tb_frag_A));
this->smem_iterator_B_.store(transform_B_(tb_frag_B));
// Wait until we have at least one completed global fetch stage
gmem_wait();
// Advance smem read and write stages
advance_smem_stages();
}
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k == 0) {
// Load fragment from global A
tb_frag_A.clear();
iterator_A.load(tb_frag_A);
++iterator_A;
// Load fragment from global B
tb_frag_B.clear();
iterator_B.load(tb_frag_B);
++iterator_B;
// Avoid reading out of bounds if this was the last loop iteration
iterator_A.clear_mask(gemm_k_iterations <= 2);
iterator_B.clear_mask(gemm_k_iterations <= 2);
}
warp_mma(
accum,
warp_frag_A[warp_mma_k % 2],
warp_frag_B[warp_mma_k % 2],
accum);
}
}
}
/// Prepares the class for another prologue.
CUTLASS_DEVICE
void wind_down()
{
// First, increment remaining warp tiles to catch it up with the write stage.
#pragma unroll
for (int warp_mma_k = 1; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k)
{
this->warp_tile_iterator_A_.set_kgroup_index(warp_mma_k);
this->warp_tile_iterator_B_.set_kgroup_index(warp_mma_k);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
}
// If we bumped the read iterators to the end of the circular buffer, wrap them around to
// align them with the write iterators
if (smem_write_stage_idx == 0)
{
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
int gemm_k_iterations, ///< number of iterations of the mainloop
FragmentC &accum, ///< destination accumulator tile
IteratorA iterator_A, ///< iterator over A operand in global memory
IteratorB iterator_B, ///< iterator over B operand in global memory
FragmentC const &src_accum) ///< source accumulator tile
{
// Prologue
prologue(iterator_A, iterator_B, gemm_k_iterations);
// Wait until we have at least one completed global fetch stage
gmem_wait();
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Perform the MAC-iterations
gemm_iters(gemm_k_iterations, accum, iterator_A, iterator_B);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/mma_pipelined.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/mma_pipelined.h",
"repo_id": "include",
"token_count": 5846
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of a warp tile
that participate in one warp-level mma operation.
Typically, this is used to access the accumulator tile/fragement of a warp-level mma operation.
The accumulator tile is then partitioned into smaller tiles/fragments that can be fed into
next warp-level mma operation.
This iterator is necessary to accomplish warp-level mma fusion where the accumulator tile is
reused as multiplicand tile for the next mma.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_conversion.h"
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Size of the accumulation tile shape (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Accumulator Element type
typename ElementAccumulator_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Output operation on the fragment
typename OutputOp_>
class MmaTensorOpFragmentIterator;
// Partial specialization for col-major accumulator tile
template <
/// Shape of warp tile to load (concept: MatrixShape)
typename Shape_,
/// Shape of the warp accumulation tile (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Accumulator Element type
typename ElementAccumulator_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Output operation on fragment
typename OutputOp_>
class MmaTensorOpFragmentIterator<Shape_, AccumulatorShape_, KBlocksColumn_, ElementAccumulator_, Element_,
cutlass::layout::ColumnMajor,
InstructionShape_, OutputOp_> {
public:
/// Shape of warp tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of the warp accumulation tile (concept: MatrixShape)
using AccumulatorShape = AccumulatorShape_;
/// KBlocks columns to compute residual
static int const kKBlockColumn = KBlocksColumn_;
/// Accumulator Element type
using ElementAccumulator = ElementAccumulator_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Output operation on fragment
using OutputOp = OutputOp_;
/// Number of participating threads
static int const kThreads = 32;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(
AccumulatorShape::kRow == Shape::kRow,
"Rows of Warp Accumulator must be the same as rows of warp");
static_assert(
!(AccumulatorShape::kColumn % Shape::kColumn),
"Shape of Warp Accumulator must be divisible by warp shape.");
static_assert(
!(kKBlockColumn % Shape::kColumn),
"KBlock size must be divisible by warp shape.");
/// Number of times this iterator can be incremented
static int const kIterations = AccumulatorShape::kCount / Shape::kCount;
};
private:
static int const kElementsPerAccess = InstructionShape::kM * InstructionShape::kN / kThreads;
/// Number of mma operations performed by a warp
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
/// Number of mma operations performed by the entire accumulator
using AccumulatorIterations = MatrixShape<AccumulatorShape::kRow / InstructionShape::kM,
AccumulatorShape::kColumn / InstructionShape::kN>;
/// Number of K iterations
static int const kKBlockIterations = (AccumulatorShape::kColumn + kKBlockColumn - 1) / kKBlockColumn;
static int const kResidualColumn = AccumulatorShape::kColumn - (kKBlockIterations - 1) * kKBlockColumn;
static int const kKBlockColumnIterations = kKBlockColumn / Shape::kColumn
* (AccumulatorShape::kRow / Shape::kRow);
static int const kResidualIndex = kResidualColumn / Shape::kColumn
* (AccumulatorShape::kRow / Shape::kRow);
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<Element, Shape::kCount / kThreads>;
/// Accumulator Fragment object
using AccumulatorFragment = Array<ElementAccumulator, AccumulatorShape::kCount / kThreads>;
/// Scale Bias Element Type
using ElementScaleBias = typename OutputOp::ElementCompute;
/// Scale Bias Fragment object
using ScaleBiasFragment = Array<ElementScaleBias, InstructionShape::kM * InstructionShape::kK / kThreads>;
private:
/// Internal access type
using AccessType = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentAccessType = Array<Element, kElementsPerAccess>;
using ScaleBiasAccessType = Array<ElementScaleBias, kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
/// Used to access residual tile first
bool is_residual_tile_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator(AccumulatorFragment const &accum)
: accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0), is_residual_tile_(true) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
index_ += index_offset;
if(is_residual_tile_ && index_ >= kKBlockColumnIterations) {
index_ = index_ - kKBlockColumnIterations + kResidualIndex;
is_residual_tile_ = false;
}
}
/// Increments
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator++() {
add_offset(1);
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator--() {
add_offset(-1);
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
int index = index_ * MmaIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; n++) {
for (int m = 0; m < MmaIterations::kRow; m++) {
int accumulator_access_offset =
n * AccumulatorIterations::kRow + m + index;
frag_ptr[m * MmaIterations::kColumn + n].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[m * MmaIterations::kColumn + n] = output_op(accumulators_[accumulator_access_offset]);
}
}
}
/// Loads a fragment from the referenced part of the accumulator tile
/// Then apply per-channel scale and bias
CUTLASS_HOST_DEVICE
void load(Fragment &frag, ScaleBiasFragment &scale,
ScaleBiasFragment &bias, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
ScaleBiasAccessType * scale_ptr = reinterpret_cast<ScaleBiasAccessType *>(&scale);
ScaleBiasAccessType * bias_ptr = reinterpret_cast<ScaleBiasAccessType *>(&bias);
int index = index_ * MmaIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; n++) {
for (int m = 0; m < MmaIterations::kRow; m++) {
int accumulator_access_offset =
n * AccumulatorIterations::kRow + m + index;
frag_ptr[m * MmaIterations::kColumn + n].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[m * MmaIterations::kColumn + n] =
output_op(accumulators_[accumulator_access_offset],
scale_ptr[n] /*scale*/, bias_ptr[n] /*bias*/);
}
}
}
};
// Partial specialization for row-major accumulator tile
template <
/// Shape of warp tile to load (concept: MatrixShape)
typename Shape_,
/// Shape of the warp accumulation tile (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Accumulator Element type
typename ElementAccumulator_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Output operation on fragment
typename OutputOp_>
class MmaTensorOpFragmentIterator<Shape_, AccumulatorShape_, KBlocksColumn_, ElementAccumulator_, Element_,
cutlass::layout::RowMajor,
InstructionShape_, OutputOp_> {
public:
/// Shape of warp tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of the warp accumulation tile (concept: MatrixShape)
using AccumulatorShape = AccumulatorShape_;
/// KBlocks columns to compute residual
static int const kKBlockColumn = KBlocksColumn_;
/// Accumulator Element type
using ElementAccumulator = ElementAccumulator_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Output operation on fragment
using OutputOp = OutputOp_;
/// Number of participating threads
static int const kThreads = 32;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(
AccumulatorShape::kRow == Shape::kRow,
"Rows of Warp Accumulator must be the same as rows of warp");
static_assert(
!(AccumulatorShape::kColumn % Shape::kColumn),
"Shape of Warp Accumulator must be divisible by warp shape.");
static_assert(
!(kKBlockColumn % Shape::kColumn),
"KBlock size must be divisible by warp shape.");
/// Number of times this iterator can be incremented
static int const kIterations = AccumulatorShape::kCount / Shape::kCount;
};
private:
static int const kRowsPerIteration = 8;
static int const kColumnsPerIteration = 16;
static int const kElementsPerIteration = kRowsPerIteration * InstructionShape::kN / kThreads;
static int const kElementsPerAccess = kRowsPerIteration * kColumnsPerIteration / kThreads;
static int const kIterationsPerAccess = kElementsPerAccess / kElementsPerIteration;
// Number of iterations per actual instruction
static int const kIterationsPerInstruction = InstructionShape::kM / kRowsPerIteration;
static int const kAccessStride = kIterationsPerInstruction;
/// Number of mma operations performed by a warp
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
/// Number of mma operations performed by the entire accumulator
using AccumulatorIterations = MatrixShape<AccumulatorShape::kRow / InstructionShape::kM,
AccumulatorShape::kColumn / InstructionShape::kN>;
/// Number of Accesses in a warp
using AccessIterations = MatrixShape<MmaIterations::kRow * kIterationsPerInstruction,
MmaIterations::kColumn / kIterationsPerAccess>;
/// Number of K iterations
static int const kKBlockIterations = (AccumulatorShape::kColumn + kKBlockColumn - 1) / kKBlockColumn;
static int const kResidualColumn = AccumulatorShape::kColumn - (kKBlockIterations - 1) * kKBlockColumn;
static int const kKBlockColumnIterations = kKBlockColumn / Shape::kColumn;
static int const kResidualIndex = kResidualColumn / Shape::kColumn;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<Element, Shape::kCount / kThreads>;
/// Accumulator Fragment object
using AccumulatorFragment = Array<ElementAccumulator, AccumulatorShape::kCount / kThreads>;
/// Scale Bias Element Type
using ElementScaleBias = typename OutputOp::ElementCompute;
/// Scale Bias Fragment object
using ScaleBiasFragment = Array<ElementScaleBias, InstructionShape::kM * InstructionShape::kK / kThreads>;
private:
/// Internal access type
using AccessType = Array<ElementAccumulator, kElementsPerIteration>;
using FragmentAccessType = Array<Element, kElementsPerIteration>;
using ScaleBiasAccessType = Array<ElementScaleBias, kElementsPerIteration>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
/// Used to access residual tile first
bool is_residual_tile_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator(AccumulatorFragment const &accum)
: accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0), is_residual_tile_(true) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
index_ += index_offset;
if(is_residual_tile_ && index_ >= kKBlockColumnIterations) {
index_ = index_ - kKBlockColumnIterations + kResidualIndex;
is_residual_tile_ = false;
}
}
/// Increments
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator++() {
add_offset(1);
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator--() {
add_offset(-1);
return *this;
}
CUTLASS_HOST_DEVICE
void set_index(int idx) {
index_ = idx;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
int index = index_ * AccessIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < AccessIterations::kCount; i++) {
int accumulator_access_offset = index / AccessIterations::kCount * (MmaIterations::kColumn * kIterationsPerInstruction) +
(index % AccessIterations::kCount) / (AccessIterations::kColumn * kIterationsPerInstruction) *
AccumulatorIterations::kColumn * kIterationsPerInstruction +
(index % (AccessIterations::kColumn * kIterationsPerInstruction)) / kIterationsPerInstruction *
(kIterationsPerInstruction * kIterationsPerAccess) +
(index % kIterationsPerInstruction);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kIterationsPerAccess; j++) {
frag_ptr[i*kIterationsPerAccess + j].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[i*kIterationsPerAccess + j] = output_op(accumulators_[accumulator_access_offset + j * kAccessStride]);
}
index++;
}
}
/// Loads a fragment from the referenced part of the accumulator tile
/// Then apply per-channel scale and bias
CUTLASS_HOST_DEVICE
void load(Fragment &frag, ScaleBiasFragment &scale,
ScaleBiasFragment & bias, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
ScaleBiasAccessType * scale_ptr = reinterpret_cast<ScaleBiasAccessType *>(&scale);
ScaleBiasAccessType * bias_ptr = reinterpret_cast<ScaleBiasAccessType *>(&bias);
int index = index_ * AccessIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < AccessIterations::kCount; i++) {
int accumulator_access_offset = index / AccessIterations::kCount * (MmaIterations::kColumn * kIterationsPerInstruction) +
(index % AccessIterations::kCount) / (AccessIterations::kColumn * kIterationsPerInstruction) *
AccumulatorIterations::kColumn * kIterationsPerInstruction +
(index % (AccessIterations::kColumn * kIterationsPerInstruction)) / kIterationsPerInstruction *
(kIterationsPerInstruction * kIterationsPerAccess) +
(index % kIterationsPerInstruction);
int scale_bias_offset = (index
% (kIterationsPerInstruction * AccessIterations::kColumn))
* kIterationsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kIterationsPerAccess; j++) {
frag_ptr[i*kIterationsPerAccess + j].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[i*kIterationsPerAccess + j] = output_op(
accumulators_[accumulator_access_offset + j * kAccessStride],
scale_ptr[scale_bias_offset + j], bias_ptr[scale_bias_offset + j]);
}
index++;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h",
"repo_id": "include",
"token_count": 7309
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a class for using IEEE half-precision floating-point types in host or
device code.
*/
#pragma once
#ifndef CUTLASS_ENABLE_F16C
#define CUTLASS_ENABLE_F16C 0
#endif
#if defined(__CUDACC_RTC__)
#include "cutlass/floating_point_nvrtc.h"
// F16C extensions are not meaningful when compiling for NVRTC which only accommodates device code.
#undef CUTLASS_ENABLE_F16C
#define CUTLASS_ENABLE_F16C 0
#else
//
// Standard Library headers belong here to avoid conflicts with NVRTC.
//
#include <cmath>
#include <limits>
#include <cstdint>
#include <cstring>
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
#include <cuda_fp16.h>
#include "cutlass/cutlass.h"
#include "cutlass/float8.h"
#include "cutlass/platform/platform.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// Optionally target F16C extentions to accelerate half-precision conversion.
#if !defined(__CUDA_ARCH__) && (CUTLASS_ENABLE_F16C)
#if defined(_MSC_VER)
#include <immintrin.h>
#if defined(__i386__) || defined(__x86_64__)
#include <intrin.h>
#endif
#define F16C_ROUND_NEAREST 0
#if !defined(__CUDA_ARCH__)
extern __inline float _cvtsh_ss (unsigned short __S) {
__m128i packed;
std::memcpy(&packed, &__S, sizeof(__S));
__m128 result = _mm_cvtph_ps(packed);
float flt;
std::memcpy(&flt, &result, sizeof(flt));
return flt;
}
__inline unsigned short _cvtss_sh (float __F, const int) {
__m128 packed;
std::memcpy(&packed, &__F, sizeof(__F));
__m128i result = _mm_cvtps_ph(packed, F16C_ROUND_NEAREST);
unsigned short u;
std::memcpy(&u, &result, sizeof(u));
return u;
}
#endif
#else
// Linux
#include <x86intrin.h>
#if defined(__i386__) || defined(__x86_64__)
#include <cpuid.h>
#endif
#define F16C_ROUND_NEAREST (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC)
#endif // _MSC_VER
class CpuId {
bool f16c_enabled;
CpuId() {
#if defined(__i386__) || defined(__x86_64__)
#if defined(_MSC_VER)
int exx[4];
__cpuid (exx, 1);
f16c_enabled = exx[2] & 0x20000000;
#else
// GCC / Clang
int eax, ebx, ecx, edx;
__cpuid (1 , eax, ebx, ecx, edx);
f16c_enabled = ecx & 0x20000000;
#endif
#else
// Arm / PowerPC etc.
f16c_enabled = false;
#endif
}
public:
bool is_f16c_supported() const {
return f16c_enabled;
}
static const CpuId& instance() {
static CpuId cpu;
return cpu;
}
};
#endif // !defined(__CUDA_ARCH__) && CUTLASS_ENABLE_F16C
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// IEEE half-precision floating-point type
struct alignas(2) half_t {
//
// Data members
//
/// Storage type
uint16_t storage;
//
// Static conversion operators
//
/// Constructs from an unsigned short
CUTLASS_HOST_DEVICE
static half_t bitcast(uint16_t x) {
half_t h;
h.storage = x;
return h;
}
/// FP32 -> FP16 conversion - rounds to nearest even
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 530)
// Avoid inlining in device code if no hardware support
__device__ __noinline__
#else
CUTLASS_HOST_DEVICE
#endif
static half_t convert(float const& flt) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__float2half_rn(flt));
#else
#if !defined(__CUDA_ARCH__) && CUTLASS_ENABLE_F16C
if( CpuId::instance().is_f16c_supported() ) {
unsigned short u = _cvtss_sh(flt, F16C_ROUND_NEAREST);
return bitcast(u);
}
#endif
// software implementation rounds toward nearest even
unsigned s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<unsigned const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
uint16_t sign = uint16_t((s >> 16) & 0x8000);
int16_t exp = uint16_t(((s >> 23) & 0xff) - 127);
int mantissa = s & 0x7fffff;
uint16_t u = 0;
if ((s & 0x7fffffff) == 0) {
// sign-preserving zero
return bitcast(sign);
}
if (exp > 15) {
if (exp == 128 && mantissa) {
// not a number
u = 0x7fff;
} else {
// overflow to infinity
u = sign | 0x7c00;
}
return bitcast(u);
}
int sticky_bit = 0;
if (exp >= -14) {
// normal fp32 to normal fp16
exp = uint16_t(exp + uint16_t(15));
u = uint16_t(((exp & 0x1f) << 10));
u = uint16_t(u | (mantissa >> 13));
} else {
// normal single-precision to subnormal half_t-precision representation
int rshift = (-14 - exp);
if (rshift < 32) {
mantissa |= (1 << 23);
sticky_bit = ((mantissa & ((1 << rshift) - 1)) != 0);
mantissa = (mantissa >> rshift);
u = (uint16_t(mantissa >> 13) & 0x3ff);
} else {
mantissa = 0;
u = 0;
}
}
// round to nearest even
int round_bit = ((mantissa >> 12) & 1);
sticky_bit |= ((mantissa & ((1 << 12) - 1)) != 0);
if ((round_bit && sticky_bit) || (round_bit && (u & 1))) {
u = uint16_t(u + 1);
}
u |= sign;
return bitcast(u);
#endif
}
/// FP32 -> FP16 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static half_t convert(int const& n) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__int2half_rn(n));
#else
return convert(float(n));
#endif
}
/// FP32 -> FP16 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static half_t convert(unsigned const& n) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__uint2half_rn(n));
#else
return convert(float(n));
#endif
}
/// Converts a half-precision value stored as a uint16_t to a float
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 530)
// Avoid inlining in device code if no hardware support
__device__ __noinline__
#else
CUTLASS_HOST_DEVICE
#endif
static float convert(half_t const& x) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __half2float(x.to_half());
#else
#if !defined(__CUDA_ARCH__) && CUTLASS_ENABLE_F16C
if( CpuId::instance().is_f16c_supported() ) {
unsigned short u = x.storage;
return _cvtsh_ss(u);
}
#endif
uint16_t const &h = x.storage;
uint32_t sign = ((h >> 15) & 1);
uint32_t exp = ((h >> 10) & 0x1f);
uint32_t mantissa = (h & 0x3ff);
unsigned f = 0;
if (exp > 0 && exp < 31) {
// normal
exp += 112;
f = (sign << 31) | (exp << 23) | (mantissa << 13);
} else if (exp == 0) {
if (mantissa) {
// subnormal
exp += 113;
while ((mantissa & (1 << 10)) == 0) {
mantissa <<= 1;
exp--;
}
mantissa &= 0x3ff;
f = (sign << 31) | (exp << 23) | (mantissa << 13);
} else {
// sign-preserving zero
f = (sign << 31);
}
} else if (exp == 31) {
if (mantissa) {
f = 0x7fffffff; // not a number
} else {
f = (0xff << 23) | (sign << 31); // inf
}
}
#if defined(__CUDA_ARCH__)
return reinterpret_cast<float const&>(f);
#else
float flt;
std::memcpy(&flt, &f, sizeof(flt));
return flt;
#endif
#endif
}
//
// Methods
//
/// Default constructor
half_t() = default;
/// Reinterpret cast from CUDA's half type
CUTLASS_HOST_DEVICE
explicit half_t(half const & x) {
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint16_t const &>(x);
#else
__half_raw raw(x);
std::memcpy(&storage, &raw.x, sizeof(storage));
#endif
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit half_t(float x) {
storage = convert(x).storage;
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit half_t(double x): half_t(float(x)) {
}
/// float_e4m3_t conversion
CUTLASS_HOST_DEVICE
explicit half_t(float_e4m3_t x): half_t(float(x)) {
}
/// float_e5m2_t conversion
CUTLASS_HOST_DEVICE
explicit half_t(float_e5m2_t x): half_t(float(x)) {
}
/// Integer conversion - round to nearest even
CUTLASS_HOST_DEVICE
explicit half_t(int x) {
storage = convert(x).storage;
}
/// Integer conversion - round toward zero
CUTLASS_HOST_DEVICE
explicit half_t(unsigned x) {
storage = convert(x).storage;
}
/// Assignment
CUTLASS_HOST_DEVICE
half_t & operator=(half const &x) {
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint16_t const &>(x);
#else
__half_raw raw(x);
std::memcpy(&storage, &raw.x, sizeof(storage));
#endif
return *this;
}
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
return convert(*this);
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(convert(*this));
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(convert(*this));
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
return (convert(*this) != 0.0f);
}
/// Bitcasts to CUDA's half type
CUTLASS_HOST_DEVICE
half to_half() const {
#if defined(__CUDA_ARCH__)
return reinterpret_cast<half const &>(storage);
#else
__half_raw raw;
std::memcpy(&raw.x, &storage, sizeof(raw.x));
return half(raw);
#endif
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint16_t& raw() {
return storage;
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint16_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((storage & 0x8000) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((storage >> 10) & 0x1f);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 15;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(storage & 0x3ff);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool signbit(cutlass::half_t const& h) {
return ((h.raw() & 0x8000) != 0);
}
CUTLASS_HOST_DEVICE
cutlass::half_t abs(cutlass::half_t const& h) {
return cutlass::half_t::bitcast(h.raw() & 0x7fff);
}
CUTLASS_HOST_DEVICE
bool isnan(cutlass::half_t const& h) {
return (h.exponent_biased() == 0x1f) && h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isfinite(cutlass::half_t const& h) {
return (h.exponent_biased() != 0x1f);
}
CUTLASS_HOST_DEVICE
cutlass::half_t nanh(const char*) {
// NVIDIA canonical NaN
return cutlass::half_t::bitcast(0x7fff);
}
CUTLASS_HOST_DEVICE
bool isinf(cutlass::half_t const& h) {
return (h.exponent_biased() == 0x1f) && !h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isnormal(cutlass::half_t const& h) {
return h.exponent_biased() && h.exponent_biased() != 0x1f;
}
CUTLASS_HOST_DEVICE
int fpclassify(cutlass::half_t const& h) {
int exp = h.exponent_biased();
int mantissa = h.mantissa();
if (exp == 0x1f) {
if (mantissa) {
return FP_NAN;
}
else {
return FP_INFINITE;
}
}
else if (!exp) {
if (mantissa) {
return FP_SUBNORMAL;
}
else {
return FP_ZERO;
}
}
return FP_NORMAL;
}
CUTLASS_HOST_DEVICE
cutlass::half_t sqrt(cutlass::half_t const& h) {
#if defined(__CUDACC_RTC__)
return cutlass::half_t(sqrtf(float(h)));
#else
return cutlass::half_t(std::sqrt(float(h)));
#endif
}
CUTLASS_HOST_DEVICE
half_t copysign(half_t const& a, half_t const& b) {
uint16_t a_mag = (a.raw() & 0x7fff);
uint16_t b_sign = (b.raw() & 0x8000);
uint16_t result = (a_mag | b_sign);
return half_t::bitcast(result);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Standard Library operations and definitions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
#if !defined(__CUDACC_RTC__)
namespace std {
/// Numeric limits
template <>
struct numeric_limits<cutlass::half_t> {
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_infinity = true;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
static std::float_denorm_style const has_denorm = std::denorm_present;
static bool const has_denorm_loss = true;
static std::float_round_style const round_style = std::round_to_nearest;
static bool const is_iec559 = true;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = 10;
/// Least positive value
static cutlass::half_t min() { return cutlass::half_t::bitcast(0x0001); }
/// Minimum finite value
static cutlass::half_t lowest() { return cutlass::half_t::bitcast(0xfbff); }
/// Maximum finite value
static cutlass::half_t max() { return cutlass::half_t::bitcast(0x7bff); }
/// Returns smallest finite value
static cutlass::half_t epsilon() { return cutlass::half_t::bitcast(0x1800); }
/// Returns maximum rounding error
static cutlass::half_t round_error() { return cutlass::half_t(0.5f); }
/// Returns positive infinity value
static cutlass::half_t infinity() { return cutlass::half_t::bitcast(0x7c00); }
/// Returns quiet NaN value
static cutlass::half_t quiet_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns signaling NaN value
static cutlass::half_t signaling_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns smallest positive subnormal value
static cutlass::half_t denorm_min() { return cutlass::half_t::bitcast(0x0001); }
};
} // namespace std
#endif
namespace cutlass {
namespace platform {
/// Forward Declaration
template <class T>
struct numeric_limits;
/// Numeric limits
template <>
struct numeric_limits<cutlass::half_t> {
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_infinity = true;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
#if !defined(__CUDACC_RTC__)
static std::float_denorm_style const has_denorm = std::denorm_present;
#endif
static bool const has_denorm_loss = true;
#if !defined(__CUDACC_RTC__)
static std::float_round_style const round_style = std::round_to_nearest;
#endif
static bool const is_iec559 = true;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = 10;
/// Least positive value
CUTLASS_HOST_DEVICE
static cutlass::half_t min() { return cutlass::half_t::bitcast(0x0001); }
/// Minimum finite value
CUTLASS_HOST_DEVICE
static cutlass::half_t lowest() { return cutlass::half_t::bitcast(0xfbff); }
/// Maximum finite value
CUTLASS_HOST_DEVICE
static cutlass::half_t max() { return cutlass::half_t::bitcast(0x7bff); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::half_t epsilon() { return cutlass::half_t::bitcast(0x1800); }
/// Returns maximum rounding error
CUTLASS_HOST_DEVICE
static cutlass::half_t round_error() { return cutlass::half_t(0.5f); }
/// Returns positive infinity value
CUTLASS_HOST_DEVICE
static cutlass::half_t infinity() { return cutlass::half_t::bitcast(0x7c00); }
/// Returns quiet NaN value
CUTLASS_HOST_DEVICE
static cutlass::half_t quiet_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns signaling NaN value
CUTLASS_HOST_DEVICE
static cutlass::half_t signaling_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns smallest positive subnormal value
CUTLASS_HOST_DEVICE
static cutlass::half_t denorm_min() { return cutlass::half_t::bitcast(0x0001); }
};
} // namespace platform
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Arithmetic operators
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool operator==(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __heq(lhs.to_half(), rhs.to_half());
#else
return float(lhs) == float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator!=(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hne(lhs.to_half(), rhs.to_half());
#else
return float(lhs) != float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator<(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hlt(lhs.to_half(), rhs.to_half());
#else
return float(lhs) < float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator<=(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hle(lhs.to_half(), rhs.to_half());
#else
return float(lhs) <= float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator>(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hgt(lhs.to_half(), rhs.to_half());
#else
return float(lhs) > float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator>=(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hge(lhs.to_half(), rhs.to_half());
#else
return float(lhs) >= float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
half_t operator+(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hadd(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) + float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator-(half_t const& lhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hneg(lhs.to_half()));
#else
return half_t(-float(lhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator-(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hsub(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) - float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator*(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hmul(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) * float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator/(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hdiv(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) / float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t& operator+=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hadd(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) + float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator-=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hsub(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) - float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator*=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hmul(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) * float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator/=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hdiv(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) / float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator++(half_t & lhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hadd(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
++tmp;
lhs = half_t(tmp);
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator--(half_t & lhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hsub(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
--tmp;
lhs = half_t(tmp);
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t operator++(half_t & lhs, int) {
half_t ret(lhs);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hadd(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
tmp++;
lhs = half_t(tmp);
#endif
return ret;
}
CUTLASS_HOST_DEVICE
half_t operator--(half_t & lhs, int) {
half_t ret(lhs);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hsub(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
tmp--;
lhs = half_t(tmp);
#endif
return ret;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// User-defined literals
//
CUTLASS_HOST_DEVICE
cutlass::half_t operator "" _hf(long double x) {
return cutlass::half_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::half_t operator "" _hf(unsigned long long int x) {
return cutlass::half_t(int(x));
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/half.h/0 | {
"file_path": "include/cutlass/half.h",
"repo_id": "include",
"token_count": 9174
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a Shape template for matrix tiles
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Describes the size of a matrix tile
template <
int Row_, ///< rows of a matrix
int Column_ ///< columns of a matrix
>
struct MatrixShape {
static int const kRow = Row_; ///< rows of a matrix
static int const kColumn = Column_; ///< columns of a matrix
static int const kCount = Row_ * Column_; ///< total number of elements in a matrix
//
// Static member functions
//
CUTLASS_HOST_DEVICE
static Coord<2> toCoord() {
return make_Coord(kRow, kColumn);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/matrix_shape.h/0 | {
"file_path": "include/cutlass/matrix_shape.h",
"repo_id": "include",
"token_count": 721
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over densely packed tensors in global memory
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/layout/matrix.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_, ///< shape of CTA (concept: MatrixShape)
typename OutputOp_ , ///< output operator (concept: epilogue::thread operator)
typename ReductionOp_, ///< reduction operator (concept: ReductionOperator)
int PartitionsPerStage = 4 ///< number of partitions to issue
>
class ReduceSplitK {
public:
using Shape = Shape_;
using ReductionOp = ReductionOp_;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = OutputOp::kCount;
static int const kPartitionsPerStage = PartitionsPerStage;
using ElementWorkspace = typename ReductionOp::Element;
using ElementAccumulator = typename ReductionOp::ElementAccumulator;
using ElementOutput = typename OutputOp::ElementOutput;
using WorkspaceTensorRef = TensorRef<ElementWorkspace, layout::RowMajor>;
using OutputTensorRef = TensorRef<ElementOutput, layout::RowMajor>;
using StrideIndex = typename WorkspaceTensorRef::Layout::Stride::Index;
using FragmentWorkspace = AlignedArray<ElementWorkspace, kElementsPerAccess>;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentOutput = AlignedArray<ElementOutput, kElementsPerAccess>;
//
// Types
//
/// Params structure
struct Params {
MatrixCoord problem_size;
int partitions;
size_t partition_stride;
WorkspaceTensorRef workspace;
OutputTensorRef destination;
OutputTensorRef source;
typename OutputOp::Params output;
typename ReductionOp::Params reduction;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
MatrixCoord problem_size_,
int partitions_,
size_t partition_stride_,
WorkspaceTensorRef workspace_,
OutputTensorRef destination_,
OutputTensorRef source_,
typename OutputOp::Params output_ = typename OutputOp::Params(),
typename ReductionOp::Params reduction_ = typename ReductionOp::Params()
):
problem_size(problem_size_),
partitions(partitions_),
partition_stride(sizeof(FragmentWorkspace) * partition_stride_ / kElementsPerAccess),
workspace(workspace_),
destination(destination_),
source(source_),
output(output_),
reduction(reduction_) {
}
};
struct SharedStorage { };
public:
/// Computes the grid size given a chosen threadblock shape
CUTLASS_HOST_DEVICE
static dim3 grid_shape(
cutlass::MatrixCoord problem_size) {
return dim3(
(problem_size.row() + Shape::kRow - 1) / Shape::kRow,
(problem_size.column() + Shape::kColumn - 1) / Shape::kColumn);
}
/// Determines the threadblock shape
CUTLASS_HOST_DEVICE
static dim3 block_shape() {
return dim3(Shape::kColumn / kElementsPerAccess, Shape::kRow);
}
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &storage) {
// Determine CTA position
MatrixCoord thread_offset(
MatrixCoord::Index(int(blockIdx.x) * Shape::kRow + threadIdx.y),
MatrixCoord::Index(int(blockIdx.y) * Shape::kColumn + threadIdx.x * kElementsPerAccess)
);
// One guard conditional
if (!(thread_offset.row() < params.problem_size.row() &&
thread_offset.column() < params.problem_size.column())) {
return;
}
ReductionOp reduction_op(params.reduction);
FragmentAccumulator accumulator;
accumulator.clear();
//
// Load the first slice
//
char const *workspace_ptr =
reinterpret_cast<char const *>(
params.workspace.data() + params.workspace.offset(thread_offset));
FragmentWorkspace workspace_frag[kPartitionsPerStage];
//
// Construct the output operator
//
OutputOp output_op(params.output);
//
// Load and accumulate with a simple batched loading sequence.
//
CUTLASS_PRAGMA_NO_UNROLL
for (int k = 0; k < params.partitions; k += kPartitionsPerStage) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPartitionsPerStage; ++i) {
if (k + i < params.partitions) {
workspace_frag[i] = *reinterpret_cast<FragmentWorkspace const *>(workspace_ptr);
workspace_ptr += params.partition_stride;
}
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPartitionsPerStage; ++i) {
if (k + i < params.partitions) {
accumulator = reduction_op(accumulator, workspace_frag[i]);
}
}
}
//
// Conditionally load the source
//
FragmentOutput source_frag;
source_frag.clear();
FragmentOutput const *source_ptr = reinterpret_cast<FragmentOutput const *>(
params.source.data() + params.source.offset(thread_offset));
if (output_op.is_source_needed()) {
reinterpret_cast<FragmentOutput &>(source_frag) = *source_ptr;
}
//
// Compute the output
//
typename OutputOp::FragmentOutput output_frag = output_op(accumulator, source_frag);
//
// Store
//
FragmentOutput *dest_ptr = reinterpret_cast<FragmentOutput *>(
params.destination.data() + params.destination.offset(thread_offset));
*dest_ptr = reinterpret_cast<FragmentOutput const &>(output_frag);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace reduction
} // namespace cutlass
| include/cutlass/reduction/kernel/reduce_split_k.h/0 | {
"file_path": "include/cutlass/reduction/kernel/reduce_split_k.h",
"repo_id": "include",
"token_count": 2623
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of a warp vector
that participate in one warp-level mma operation.
Typically, this is used to access the scale/bias fragement of a warp-level mma operation.
The scale/bias vector is then partitioned into smaller fragments that can be fed into
next warp-level mma operation.
This iterator is necessary to accomplish warp-level mma fusion where the scale/bias vector is
applied to the multiplicand for the next mma.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_conversion.h"
namespace cutlass {
namespace transform {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the input fragment tile shape (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
//// Number of elements per access when loading fragment
int ElementsPerAccess>
class VectorFragmentIterator;
// Partial specialization for PitchLinear layout tile
template <
/// Size of the input fragment vector shape (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
//// Number of elements per access when loading fragment
int ElementsPerAccess>
class VectorFragmentIterator<Shape_, Element_,
cutlass::layout::PitchLinear,
InstructionShape_, ElementsPerAccess> {
public:
/// Size of the input threadblock tile shape (concept: MatrixShape)
using Shape = Shape_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::PitchLinear;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Number of participating threads
static int const kThreads = 32;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kRowsPerIteration = 8;
static int const kColumnsPerAccess = 8;
static int const kElementsPerIteration = kRowsPerIteration * InstructionShape::kK / kThreads;
static int const kAccessPerIteration = kElementsPerIteration / kElementsPerAccess;
/// Number of iterations
using Iterations = MatrixShape<InstructionShape::kM / kRowsPerIteration, Shape::kContiguous / kElementsPerIteration>;
public:
//
// Derived quantities
//
// All fragments have kElementsPerAccess scale followed by bias
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one iteration of the iterator.
using Fragment = Array<Element, kElementsPerIteration * Iterations::kRow>;
/// Input threadblock fragment tile
using ThreadblockFragment = Array<Element, Shape::kContiguous >;
private:
/// Internal access type
using AccessType = Array<Element, kElementsPerAccess>;
private:
//
// Data members
//
/// Input threadblock fragment tile
AccessType const *iterator_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
VectorFragmentIterator(ThreadblockFragment const &threadblock_frag)
: iterator_(reinterpret_cast<AccessType const *>(&threadblock_frag)),
index_(0) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
index_ += index_offset;
if(index_ >= Iterations::kColumn)
index_ = 0;
}
/// Increments
CUTLASS_HOST_DEVICE
VectorFragmentIterator &operator++() {
add_offset(1);
return *this;
}
CUTLASS_HOST_DEVICE
void set_index(int idx) {
index_ = idx;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int r = 0; r < Iterations::kRow; r++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kAccessPerIteration; i++) {
frag_ptr[i * Iterations::kRow + r].clear();
frag_ptr[i * Iterations::kRow + r] = iterator_[index_ * kAccessPerIteration + i];
}
}
}
};
// Partial specialization for Row-Major layout tile
template <
/// Size of the input fragment tile shape (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
//// Number of elements per access when loading fragment
int ElementsPerAccess>
class VectorFragmentIterator<Shape_, Element_,
cutlass::layout::RowMajor,
InstructionShape_, ElementsPerAccess> {
public:
/// Size of the input threadblock tile shape (concept: MatrixShape)
using Shape = Shape_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Underlying iterator
using Base = VectorFragmentIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, InstructionShape, ElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one iteration of the iterator.
using Fragment = typename Base::Fragment;
/// Input threadblock fragment tile
using ThreadblockFragment = typename Base::ThreadblockFragment;
private:
/// Underlying iterator
Base iterator_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
VectorFragmentIterator(ThreadblockFragment const &threadblock_frag)
: iterator_(threadblock_frag) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
iterator_.add_offset(index_offset);
}
/// Increments
CUTLASS_HOST_DEVICE
VectorFragmentIterator &operator++() {
add_offset(1);
return *this;
}
CUTLASS_HOST_DEVICE
void set_index(int idx) {
iterator_.set_index(idx);
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/warp/vector_fragment_iterator.h/0 | {
"file_path": "include/cutlass/transform/warp/vector_fragment_iterator.h",
"repo_id": "include",
"token_count": 2738
} | 36 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Base class for Epilogue Visitor Emitter
"""
from cutlass_library import DataTypeTag
from cutlass.backend.evt.ir import TopoVisitorNode, DAGIR
class FusionCallbacks:
def __init__(self, dag_ir: DAGIR, cc: int, emit_CD=True) -> None:
"""
Emit the EVT fusion callbacks
:param dag_ir: the DAG IR holding the epilogue visitor
:param cc: compute capability
:param emit_CD: whether to emit nodes C & D as a part of the fusion callbacks
For Sm90, set emit_CD=False, as Tensor C & D are hardcoded in the collective API
so that their shared memory can be explicitly reused
For Sm89, set emit_CD=True as they are treated as normal AuxLoad & AuxStore nodes.
"""
self.dag_ir = dag_ir
self.emit_CD = emit_CD
self.cc = cc
if self.cc < 90:
self.namespace = "threadblock"
else:
self.namespace = "fusion"
#
# Helper functions
#
def get_visitor_name(self, node: str):
"""
Get the visitor name
"""
meta = self.dag_ir.get_node_meta(node)
if not isinstance(meta, TopoVisitorNode) and self.dag_ir.in_degree(node) > 0:
return f"EVT{meta.name_camel}"
else:
return meta.name_camel
def emit(self):
node_metas = self.dag_ir.node_metas_topological_order()
epilogue_str = ""
# Step 1: emit individual node type decl
# emit the EVT & DAG connector
for meta in node_metas:
if not meta.disabled:
epilogue_str += self.emit_node(meta)
if not self.emit_CD and meta.name == "D":
continue
if isinstance(meta, TopoVisitorNode):
epilogue_str += self.emit_dag(meta)
else:
epilogue_str += self.emit_evt(meta)
# Step 2: post-processing & get callback name
if not self.emit_CD:
if not self.dag_ir.has_node("C"):
epilogue_str += "using ElementC = void;\nusing StrideC = StrideD;\n"
output_node = self.dag_ir.get_all_inputs("D")[0]
# The callback is the src of node D
callback_name = self.get_visitor_name(output_node)
else:
# The callback is the last node in the topological order
callback_name = self.get_visitor_name(node_metas[-1].name)
return epilogue_str, callback_name
def emit_evt(self, node):
if self.dag_ir.in_degree(node.name) == 0:
return ""
evt_tmp = f"""
using EVT{node.name_camel} = cutlass::epilogue::{self.namespace}::Sm{self.cc}EVT<
{node.name_camel},
"""
sorted_children = self.dag_ir.get_all_inputs(node.name)
evt_node_strs = [f" {self.get_visitor_name(child_name)}" for child_name in sorted_children]
evt_tmp += ",\n".join(evt_node_strs) + ">;\n"
return evt_tmp
def emit_dag(self, node):
subgraph = node.subgraph
subgraph_nodes = subgraph.nodes_topological_order()
# Emit the Edge Tuple
edge_tuples = "cute::tuple<\n"
for n in subgraph_nodes[:-1]:
in_edges = subgraph.in_edges(n)
edge_weights = [subgraph.get_edge_weight(edge[0], edge[1]) for edge in in_edges]
sorted_children = [edge[0] for _, edge in sorted(zip(edge_weights, in_edges))]
edge_tuple = " cute::seq<"
edge_str = [str(subgraph_nodes.index(child)) for child in sorted_children]
edge_tuple += ", ".join(edge_str) + ">,\n"
edge_tuples += edge_tuple
edge_tuples += " >"
# Emit the node list
dag_nodes = ""
dag_node_strs = []
for n in subgraph_nodes[:-1]:
n_meta = subgraph.get_node_meta(n)
if n_meta.disabled:
dag_node_strs.append(f" {self.get_visitor_name(n)}")
else:
dag_node_strs.append(f" {n_meta.name_camel}")
dag_nodes = ",\n".join(dag_node_strs)
return f"""
using {node.name_camel} = cutlass::epilogue::{self.namespace}::Sm{self.cc}TopologicalVisitor<
{DataTypeTag[node.subgraph.element_compute]},
{edge_tuples},
{dag_nodes}
>;
"""
def emit_node(self, node):
if isinstance(node, TopoVisitorNode):
emission = ""
for node in node.subgraph.node_metas_topological_order():
if not node.disabled:
emission += self.emit_node(node)
return emission
else:
return node.underlying_impl.type_decl
| python/cutlass/backend/evt/backend/emitter_base.py/0 | {
"file_path": "python/cutlass/backend/evt/backend/emitter_base.py",
"repo_id": "python",
"token_count": 2763
} | 37 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Store node and implementations
"""
import ctypes
from cutlass_library import DataType
from cutlass.backend.c_types import tuple_factory
from cutlass.backend.epilogue import dtype2ctype, to_ctype_value
from cutlass.backend.evt.ir.node import NodeBase, ImplBase, NoOpImpl
from cutlass.backend.evt.ir.tensor import Tensor
from cutlass.backend.library import FloatRoundStyle, FunctionalOp
class StoreImplBase(ImplBase):
"""
Base class for store node implementation
"""
reserved_names = ["D"]
def __init__(self, node) -> None:
super().__init__(node)
self.element = node.element
self.element_output = node.element_output
self.stride = node.store_tensor.stride
class StoreDImpl(StoreImplBase):
"""
Store D implementation
"""
@property
def argument_type_d(self):
stride_mnl = self.get_stride_mnl()
tuple_type = tuple_factory(stride_mnl, self.stride_dtype)
class _Argument(ctypes.Structure):
_fields_ = [
("ptr_D", ctypes.c_void_p),
("stride_D", tuple_type)
]
def __init__(self, ptr: int) -> None:
self.ptr_D = ptr
self.stride_D = tuple_type(stride_mnl)
return _Argument
@staticmethod
def match(node, problem_size: tuple):
if node.name == "D" and node.store_tensor.shape == problem_size:
return True
return False
class AuxStoreImpl(StoreImplBase):
def __init__(self, node) -> None:
super().__init__(node)
self.round_style = FloatRoundStyle.ToNearest
@property
def argument_type(self):
stride_mnl = self.get_stride_mnl()
name = self.name
tuple_type = tuple_factory(stride_mnl, self.stride_dtype)
class _Argument(ctypes.Structure):
_fields_ = [
("ptr_aux", ctypes.c_void_p),
("dAux", tuple_type)
]
def __init__(self, kwargs) -> None:
ptr = kwargs[name]
self.ptr_aux = ptr
self.dAux = tuple_type(stride_mnl)
return _Argument
@staticmethod
def match(node, problem_size: tuple):
if not node.is_output:
return False
if node.name in StoreImplBase.reserved_names:
return False
strideMN = node.store_tensor.stride[-2:]
if (strideMN[0] == 1 and strideMN[1] != 0 or
strideMN[0] != 0 and strideMN[1] == 1 ):
return True
else:
return False
class ReductionImplBase(StoreImplBase):
def __init__(self, node) -> None:
super().__init__(node)
self.element = node.store_tensor.element
self.element_compute = node.element_compute
self.reg_reduce_fn = self.node.reg_reduce_fn
self.gmem_reduce_fn = self.node.gmem_reduce_fn
self.round_style = node.round_style
self.stride_dtype = "int"
def get_reduce_identity(self):
"""
Return the reduction identity of the current reduce_fn
"""
maxes = {
DataType.f32: (2 ** 31) - 1,
DataType.f16: (2 ** 15),
DataType.s32: (2 ** 31) - 1,
DataType.s8: (2 ** 7) - 1
}
mins = {
DataType.f32: -maxes[DataType.f32],
DataType.f16: -maxes[DataType.f16],
DataType.s32: -maxes[DataType.s32],
DataType.s8: -maxes[DataType.s8]
}
if self.reg_reduce_fn == FunctionalOp.Maximum:
if self.element_compute not in mins:
raise Exception(f"No min entry for data type {self.element_compute}")
return to_ctype_value(mins[self.element_compute], self.element_compute)
elif self.reg_reduce_fn == FunctionalOp.Multiplies:
return to_ctype_value(1., self.element_compute)
elif self.reg_reduce_fn == FunctionalOp.Minimum:
if self.element_compute not in maxes:
raise Exception(f"No max entry for data type {self.element_compute}")
return to_ctype_value(maxes[self.element_compute], self.element_compute)
else:
return to_ctype_value(0., self.element_compute)
@property
def argument_type(self):
self.get_reduce_identity()
stride_mnl = self.get_stride_mnl()
name = self.name
tuple_type = tuple_factory(stride_mnl, self.stride_dtype)
element_compute = self.element_compute
reduce_identity = self.get_reduce_identity()
class _Argument(ctypes.Structure):
_fields_ = [
("ptr", ctypes.c_void_p),
("reduce_identity", dtype2ctype[element_compute]),
("dMNL", tuple_type)
]
def __init__(self, kwargs) -> None:
ptr = kwargs[name]
self.ptr = ptr
self.reduce_identity = reduce_identity
self.dMNL = tuple_type(stride_mnl)
return _Argument
class ColumnReductionImpl(ReductionImplBase):
@staticmethod
def match(node, problem_size: tuple):
if not node.is_output:
return False
if node.name in StoreImplBase.reserved_names:
return False
strideMN = node.store_tensor.stride[-2:]
if strideMN == (1, 0):
return True
else:
return False
class RowReductionImpl(ReductionImplBase):
@staticmethod
def match(node, problem_size: tuple):
if not node.is_output:
return False
if node.name in StoreImplBase.reserved_names:
return False
strideMN = node.store_tensor.stride[-2:]
if strideMN == (0, 1):
return True
else:
return False
class ScalarReductionImpl(ReductionImplBase):
@staticmethod
def match(node, problem_size: tuple):
if not node.is_output:
return False
if node.name in StoreImplBase.reserved_names:
return False
strideMN = node.store_tensor.stride[-2:]
if strideMN == (0, 0):
return True
else:
return False
class StoreNode(NodeBase):
"""
Store node
"""
possible_impls = [
AuxStoreImpl, RowReductionImpl,
ColumnReductionImpl, ScalarReductionImpl,
NoOpImpl, StoreDImpl
]
def __init__(self, name: str) -> None:
super().__init__(name)
self.op = "store"
self.is_output = False
self._store_tensor = None
@property
def store_tensor(self) -> Tensor:
"""
Return the output tensor (concept: cutlass.backend.evt.ir.tensor)
"""
return self._store_tensor
@store_tensor.setter
def store_tensor(self, kwargs):
"""
Setting the tensor
"""
self._store_tensor = Tensor(**kwargs)
def type_propagation(self, input_node_metas: 'list[NodeBase]'):
"""
The store nodes has element_output = element_input
"""
if self.is_output:
if self.store_tensor is None:
raise RuntimeError(f"The store tensor of node {self.name} is unknown.")
self.element = self.store_tensor.element
assert len(input_node_metas) == 1, "Store node can only have one input node"
self.element_output = input_node_metas[0].element_output
def broadcast_propagation(self, input_node_metas: 'list[NodeBase]'):
super().broadcast_propagation(input_node_metas)
if self.is_output:
self._store_tensor.broadcast(self.tensor.shape)
| python/cutlass/backend/evt/ir/store_nodes.py/0 | {
"file_path": "python/cutlass/backend/evt/ir/store_nodes.py",
"repo_id": "python",
"token_count": 4087
} | 38 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import copy
import ctypes
import enum
from cuda import cuda, cudart
from cutlass_library import SubstituteTemplate
import numpy as np
from cutlass_library import (
ComplexTransformTag,
DataType,
DataTypeNames,
DataTypeSize,
DataTypeTag,
EpilogueScheduleSuffixes,
EpilogueScheduleTag,
EpilogueScheduleType,
GemmKind,
GemmKindNames,
GemmUniversalMode,
KernelScheduleSuffixes,
KernelScheduleTag,
KernelScheduleType,
LayoutTag,
LayoutType,
MathOperation,
MathOperationTag,
OpcodeClass,
OpcodeClassNames,
OpcodeClassTag,
OperationKind,
ShortComplexLayoutNames,
ShortDataTypeNames,
ShortLayoutTypeNames,
SwizzlingFunctor,
SwizzlingFunctorTag,
TileSchedulerSuffixes,
TileSchedulerTag,
TileSchedulerType,
get_complex_from_real
)
from cutlass.backend.arguments import ArgumentBase
from cutlass.backend.c_types import (
GemmCoord_,
GemmCoordBatched_,
GenericMainloopArguments3x_,
StrideBatched_,
dim3_,
get_gemm_arguments,
get_gemm_arguments_3x,
get_gemm_arguments_streamk,
get_gemm_grouped_arguments,
get_mainloop_arguments_3x,
get_tile_scheduler_arguments_3x,
)
from cutlass.backend.library import (
ApiVersion,
EmissionType,
SchedulerMode,
SchedulerModeTag,
TensorDescription,
TileDescription,
api_version,
)
from cutlass.backend.memory_manager import device_mem_alloc, todevice
from cutlass.backend.operation import ExecutableOperation, LaunchConfiguration
from cutlass.backend.type_hint import GemmOperation, Tensor
from cutlass.backend.utils.device import device_sm_count
from cutlass.shape import GemmCoord, MatrixCoord
################################################################################
#
# Data structure modeling a GEMM operation
#
################################################################################
def leading_dimension(layout: LayoutType, shape: MatrixCoord) -> int:
"""
Returns the leading dimenson of a tensor with layout ``layout`` and shape ``shape``.
:param layout: layout of the tensor
:type layout: cutlass.shape.LayoutType
:param shape: shape of the tensor
:type shape: cutlass.shape.MatrixCoord
:return: leading dimension of the tensor
:rtype: int
"""
if layout == LayoutType.RowMajor:
return shape.column
elif layout == LayoutType.ColumnMajor:
return shape.row
def transpose_layout(layout: LayoutType) -> LayoutType:
if layout == LayoutType.ColumnMajor:
return LayoutType.RowMajor
elif layout == LayoutType.RowMajor:
return LayoutType.ColumnMajor
else:
raise ValueError(f"Unsupported Layout {layout}")
class GemmArguments2x(ArgumentBase):
"""
Argument wrapper for GEMM in CUTLASS 2. It encodes problem information and
user-provide tensors into the kernel's argument
:param operation: the GEMM operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationUniversal` |
:class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: GEMM problem size gemm(M, N, K)
:type operation: :class:`cutlass.shape.GemmCoord`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param gemm_mode: GEMM mode
:type gemm_mode: :class:`cutlass_library.GemmUniversalMode`
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
:param stream: cuda stream, defaults to cuda.cuda.CUstream(0)
:type stream: :class:`cuda.cuda.CUstream`
"""
def __init__(self, operation, problem_size, A, B, C, D, gemm_mode=GemmUniversalMode.Gemm, **kwargs):
self.operation = operation
self.layout_A = operation.A.layout
self.layout_B = operation.B.layout
self.layout_C = operation.C.layout
self.element_A = operation.A.element
self.element_B = operation.B.element
self.element_C = operation.C.element
if operation.C.layout in [LayoutType.RowMajorInterleaved32, LayoutType.ColumnMajorInterleaved32]:
raise Exception("Interleaved layout not currently supported")
if hasattr(self.operation.epilogue_functor, "visitor") and operation.arch != 90:
super().__init__(A, B, None, None, **kwargs)
else:
super().__init__(A, B, C, D, **kwargs)
if operation.switched:
self.problem_size = GemmCoord(problem_size.n, problem_size.m, problem_size.k)
self.ptr_A, self.ptr_B = self.ptr_B, self.ptr_A
else:
self.problem_size = problem_size
# If the number of elements in C = problem_size.n, C is treated as the bias
if hasattr(self, "tensor_c_numel"):
if self.tensor_c_numel == self.problem_size.n and self.problem_size.m != 1:
self.bias = True
self.lda = leading_dimension(self.layout_A, self.problem_size.mk)
self.ldb = leading_dimension(self.layout_B, self.problem_size.kn)
self.ldc = leading_dimension(self.layout_C, self.problem_size.mn)
self.ldd = self.ldc
if self.bias:
self.ldc = 0
if "output_op" in kwargs.keys() and gemm_mode != GemmUniversalMode.GemmSplitKParallel:
self.output_op = kwargs["output_op"]
else:
if self.operation.epilogue_functor.element_epilogue in [DataType.s8, DataType.s32, DataType.u8, DataType.u32]:
dtype = int
else:
dtype = float
self.output_op = self.operation.epilogue_type(dtype(1.0), dtype(0.0))
self.gemm_mode = gemm_mode
if gemm_mode in [GemmUniversalMode.Gemm, GemmUniversalMode.GemmSplitKParallel]:
if "split_k_slices" in kwargs.keys():
self.batch_count = kwargs["split_k_slices"]
else:
self.batch_count = 1
self.split_k_slices = self.batch_count
if gemm_mode in [GemmUniversalMode.Batched, GemmUniversalMode.Array]:
if "batch" in kwargs.keys():
self.batch_count = kwargs["batch"]
else:
self.batch_count = 1
if "batch_strides" in kwargs:
self.batched_stride_A = kwargs["batch_strides"]["A"]
self.batched_stride_B = kwargs["batch_strides"]["B"]
self.batched_stride_C = kwargs["batch_strides"]["C"]
self.batched_stride_D = kwargs["batch_strides"]["D"]
else:
self.batched_stride_A = self.problem_size.m * self.problem_size.k
self.batched_stride_B = self.problem_size.n * self.problem_size.k
self.batched_stride_C = self.problem_size.m * self.problem_size.n
self.batched_stride_D = self.problem_size.m * self.problem_size.n
if self.bias:
self.batched_stride_C = self.problem_size.n
if gemm_mode == GemmUniversalMode.Array:
self.ptr_A_array = []
self.ptr_B_array = []
self.ptr_C_array = []
self.ptr_D_array = []
ptr_A_addr = int(self.ptr_A)
ptr_B_addr = int(self.ptr_B)
ptr_C_addr = int(self.ptr_C)
ptr_D_addr = int(self.ptr_D)
stride_A = self.batched_stride_A * DataTypeSize[self.element_A] // 8
stride_B = self.batched_stride_B * DataTypeSize[self.element_B] // 8
stride_C = self.batched_stride_C * DataTypeSize[self.element_C] // 8
stride_D = self.batched_stride_D * DataTypeSize[self.element_C] // 8
for _ in range(self.batch_count):
self.ptr_A_array.append(ptr_A_addr)
self.ptr_B_array.append(ptr_B_addr)
self.ptr_C_array.append(ptr_C_addr)
self.ptr_D_array.append(ptr_D_addr)
ptr_A_addr += stride_A
ptr_B_addr += stride_B
ptr_C_addr += stride_C
ptr_D_addr += stride_D
self.ptr_A_array_buffer = todevice(self.ptr_A_array, dtype=np.int64)
self.ptr_B_array_buffer = todevice(self.ptr_B_array, dtype=np.int64)
self.ptr_C_array_buffer = todevice(self.ptr_C_array, dtype=np.int64)
self.ptr_D_array_buffer = todevice(self.ptr_D_array, dtype=np.int64)
if isinstance(self.operation, GemmOperationUniversal):
self.initialize()
def get_arguments(self):
problem_size_ = self.problem_size.ctype
grid_tiled_shape_ = GemmCoord(
self.grid_tiled_shape.x,
self.grid_tiled_shape.y,
self.grid_tiled_shape.z ).ctype
if self.gemm_mode == GemmUniversalMode.Array:
arguments = self.operation.argument_type(
# Arguments from UniversalArgumentsBase
self.gemm_mode,
problem_size_,
self.batch_count,
0,
# Remaining arguments
self.output_op,
int(self.ptr_A_array_buffer.ptr),
int(self.ptr_B_array_buffer.ptr),
int(self.ptr_C_array_buffer.ptr),
int(self.ptr_D_array_buffer.ptr),
0, 0, 0,
self.lda, self.ldb, self.ldc, self.ldd,
self.lda, self.ldb, self.ldc, self.ldd,
0, 0, 0
)
else:
arguments = self.operation.argument_type(
# Arguments from UniversalArgumentsBase
self.gemm_mode, problem_size_, self.batch_count, self.batched_stride_D,
# Remaining arguments
self.output_op,
int(self.ptr_A),
int(self.ptr_B),
int(self.ptr_C),
int(self.ptr_D),
self.batched_stride_A,
self.batched_stride_B,
self.batched_stride_C,
self.lda, self.ldb, self.ldc, self.ldd,
self.lda, self.ldb, self.ldc, self.ldd,
0, 0, 0
)
self.arguments = arguments, grid_tiled_shape_, self.gemm_k_size
def initialize(self):
launch_config = self.operation.rt_module.plan(self)
# Get the host and device workspace
device_workspace_size = self.operation.rt_module.get_device_workspace_size(self)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
device_workspace = 0
if workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.GemmSplitKParallel:
# In GEMM splik-K parallel, the D pointer is redirected to the workspace
self.ptr_D = cuda.CUdeviceptr(workspace_ptr)
elif workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.Gemm:
device_workspace = workspace_ptr
self.get_arguments()
arguments, grid_tiled_shape, gemm_k_size = self.arguments
res_arg = self.operation.rt_module.get_args(
ctypes.byref(arguments), ctypes.c_void_p(int(device_workspace)))
host_workspace = bytearray(res_arg.contents)
device_workspace = None
self.host_workspace = host_workspace
self.device_workspace = device_workspace
self.launch_config = launch_config
def sync(self, stream_sync=True):
super().sync(stream_sync)
if hasattr(self.output_op, "sync"):
self.output_op.sync()
class GemmArguments2xStreamK(GemmArguments2x):
"""
Argument wrapper for stream-K GEMMs in CUTLASS 2. It encodes problem information and
user-provide tensors into the kernel's argument
:param operation: the GEMM operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationUniversal` |
:class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: GEMM problem size gemm(M, N, K)
:type operation: :class:`cutlass.shape.GemmCoord`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param gemm_mode: GEMM mode
:type gemm_mode: :class:`cutlass_library.GemmUniversalMode`
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
"""
def __init__(self, operation, problem_size, A, B, C, D, gemm_mode=GemmUniversalMode.Gemm, **kwargs):
if gemm_mode not in [GemmUniversalMode.Gemm, GemmUniversalMode.Batched]:
raise Exception(f"Unsupported GEMM mode {gemm_mode}.")
super().__init__(operation, problem_size, A, B, C, D, gemm_mode, **kwargs)
def get_arguments(self):
batch_stride_A = self.problem_size.m * self.problem_size.k
batch_stride_B = self.problem_size.k * self.problem_size.n
batch_stride_C = self.problem_size.m * self.problem_size.n
batch_stride_D = self.problem_size.m * self.problem_size.n
arguments = self.operation.argument_type(
self.gemm_mode,
GemmCoord_(self.problem_size.m, self.problem_size.n, self.problem_size.k),
self.batch_count,
self.output_op,
int(self.ptr_A),
int(self.ptr_B),
int(self.ptr_C),
int(self.ptr_D),
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D,
self.lda, self.ldb, self.ldc, self.ldd, # strides
self.lda, self.ldb, self.ldc, self.ldd,
-1, # avail_sms
)
return arguments
def initialize(self):
# Get the host and device workspace
device_workspace_size = self.operation.rt_module.get_device_workspace_size(
self,
device_sm_count(),
self.operation.rt_module.occupancy
)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
device_workspace = 0
if workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.GemmSplitKParallel:
# In GEMM splik-K parallel, the D pointer is redirected to the workspace
self.ptr_D = cuda.CUdeviceptr(workspace_ptr)
elif workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.Gemm:
device_workspace = workspace_ptr
arguments = self.get_arguments()
res_arg = self.operation.rt_module.get_args(
ctypes.byref(arguments),
ctypes.c_void_p(int(device_workspace)),
device_sm_count(),
self.operation.rt_module.occupancy
)
host_workspace = bytearray(res_arg.contents)
grid = self.operation.rt_module.get_grid_shape(
ctypes.byref(arguments),
device_sm_count(),
self.operation.rt_module.occupancy
)
device_workspace = None
self.host_workspace = host_workspace
self.device_workspace = device_workspace
self.launch_config = LaunchConfiguration(
[grid.m, grid.n, grid.k],
[self.operation.rt_module.threads, 1, 1],
self.operation.rt_module.shared_memory_capacity
)
class GemmArguments3x(GemmArguments2x):
"""
Argument wrapper for GEMM in CUTLASS 3. It encodes problem information and
user-provide tensors into the kernel's argument
:param operation: the GEMM operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationUniversal` |
:class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: GEMM problem size gemm(M, N, K)
:type operation: :class:`cutlass.shape.GemmCoord`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param gemm_mode: GEMM mode
:type gemm_mode: GemmUniversalMode
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
"""
def __init__(self, operation, problem_size, A, B, C, D, gemm_mode=GemmUniversalMode.Gemm, **kwargs):
if gemm_mode not in [GemmUniversalMode.Gemm, GemmUniversalMode.Batched]:
raise Exception(f"Unsupported GEMM mode {gemm_mode}.")
super().__init__(operation, problem_size, A, B, C, D, gemm_mode, **kwargs)
def get_arguments(self):
mainloop_args = get_mainloop_arguments_3x(
self.operation.tile_description.kernel_schedule,
self.operation.A.element,
self.operation.B.element,
self.operation.A.alignment,
self.operation.B.alignment
)
scheduler_args = get_tile_scheduler_arguments_3x(self.operation.tile_description.tile_scheduler)
uses_default_epilogue = self.operation.rt_module.uses_default_epilogue()
argument_type, epilogue_args, epilogue_type, hw_info = get_gemm_arguments_3x(
mainloop_args, self.operation.epilogue_functor, scheduler_args, uses_default_epilogue)
problem_size_ = GemmCoordBatched_(self.problem_size, self.batch_count)
if self.batch_count > 1:
bsA = self.batched_stride_A
bsB = self.batched_stride_B
bsC = self.batched_stride_C
bsD = self.batched_stride_D
else:
bsA = 0
bsB = 0
bsC = 0
bsD = 0
stride_A = StrideBatched_(self.lda, bsA)
stride_B = StrideBatched_(self.ldb, bsB)
stride_C = StrideBatched_(self.ldc, bsC)
stride_D = StrideBatched_(self.ldd, bsD)
# Superset of potential mainloop arguments
generic_args = GenericMainloopArguments3x_(
int(self.ptr_A),
stride_A,
int(self.ptr_B),
stride_B,
4 # mma_promotion_interval
)
# Set of mainloop arguments needed for this kernel
mainloop = mainloop_args.from_generic_mainloop_args(generic_args)
if not uses_default_epilogue and hasattr(self.output_op, "to_evt_params"):
self.output_op = self.output_op.to_evt_params()
epilogue = epilogue_args(
self.output_op,
int(self.ptr_C),
stride_C,
int(self.ptr_D),
stride_D,
)
# Set hardware info
hw_info_ = hw_info(
0, device_sm_count(),
)
self.arguments = argument_type(
int(self.gemm_mode),
problem_size_,
mainloop,
epilogue,
hw_info_,
scheduler_args
)
return self.arguments
def initialize(self):
# Get the host and evice workspace
device_workspace_size = self.operation.rt_module.get_device_workspace_size(self)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
device_workspace = 0
if workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.GemmSplitKParallel:
# In GEMM splik-K parallel, the D pointer is redirected to the workspace
self.ptr_D = cuda.CUdeviceptr(workspace_ptr)
elif workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.Gemm:
device_workspace = workspace_ptr
self.get_arguments()
res_arg = self.operation.rt_module.get_args(
ctypes.byref(self.arguments),
ctypes.c_void_p(int(device_workspace)),
)
host_workspace = bytearray(res_arg.contents)
grid = self.operation.rt_module.get_grid_shape(
ctypes.byref(self.arguments),
ctypes.c_void_p(int(device_workspace)),
)
block = self.operation.rt_module.get_block_shape()
device_workspace = None
self.host_workspace = host_workspace
self.device_workspace = device_workspace
self.launch_config = LaunchConfiguration(
[grid.x, grid.y, grid.z],
[block.x, block.y, block.z],
self.operation.rt_module.shared_memory_capacity,
)
def GemmArguments(operation, problem_size, A, B, C, D, gemm_mode=GemmUniversalMode.Gemm, **kwargs):
"""
Argument wrapper for GEMM in CUTLASS 2 or 3. It returns either 2x arguments
or 3x arguments depending on the `arch` field specified in `operation`.
:param operation: the GEMM operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationUniversal` |
:class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: GEMM problem size gemm(M, N, K)
:type operation: :class:`cutlass.shape.GemmCoord`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param gemm_mode: GEMM mode
:type gemm_mode: :class:`cutlass_library.GemmUniversalMode`
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
"""
if operation.swizzling_functor == SwizzlingFunctor.StreamK:
if operation.api == ApiVersion.v3x:
raise Exception("Stream K is currently only supported in CUTLASS 2.x")
ArgClass = GemmArguments2xStreamK
else:
ArgClass = GemmArguments3x if operation.api == ApiVersion.v3x else GemmArguments2x
return ArgClass(operation, problem_size, A, B, C, D, gemm_mode, **kwargs)
class GemmGroupedArguments:
"""
Argument wrapper for GEMM Grouped. It encodes problem information and
user-provide tensors into the kernel's argument
:param operation: the GEMM Grouped operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: list of GEMM problem size gemm(M, N, K)
:type operation: list[:class:`cutlass.shape.GemmCoord`]
:param A: list of tensor A
:type A: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param B: list of tensor B
:type B: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param C: list of tensor C
:type C: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param D: list of tensor D
:type D: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
:param stream: cuda stream, defaults to cuda.cuda.CUstream(0)
:type stream: :class:`cuda.cuda.CUstream`
"""
def __init__(self, operation, problem_sizes, A, B, C, D, **kwargs):
# Get number of problems in the group
self.problem_count = len(problem_sizes)
# Check the input arguments
assert len(A) == self.problem_count
assert len(B) == self.problem_count
assert len(C) == self.problem_count
assert len(D) == self.problem_count
problem_size_host = []
self.ptr_A_host = []
self.ptr_B_host = []
self.ptr_C_host = []
self.ptr_D_host = []
lda_host = []
ldb_host = []
ldc_host = []
ldd_host = []
self.partitions = 1
self.operation = operation
# Get the threadblock
threadblock_shape = operation.tile_description.threadblock_shape
self.threadblock_shape = GemmCoord(
threadblock_shape[0],
threadblock_shape[1],
threadblock_shape[2],
)
self.threadblock_swizzle = operation.swizzling_functor
self.total_tiles = 0
self.gemm_arguments = []
self.stream = kwargs.get("stream", cuda.CUstream(0))
# Process the input arguments
for idx, problem_size in enumerate(problem_sizes):
M, N, K = problem_size.m, problem_size.n, problem_size.k
temp_argument = GemmArguments2x(
operation=operation,
problem_size=GemmCoord(M, N, K),
A=A[idx], B=B[idx], C=C[idx], D=D[idx])
self.gemm_arguments.append(temp_argument)
problem_size_host.append(
[temp_argument.problem_size.m,
temp_argument.problem_size.n,
temp_argument.problem_size.k]
)
self.ptr_A_host.append(int(temp_argument.ptr_A))
lda_host.append(temp_argument.lda)
self.ptr_B_host.append(int(temp_argument.ptr_B))
ldb_host.append(temp_argument.ldb)
self.ptr_C_host.append(int(temp_argument.ptr_C))
ldc_host.append(temp_argument.ldc)
self.ptr_D_host.append(int(temp_argument.ptr_D))
ldd_host.append(temp_argument.ldd)
# Get number of tiles
grid = self.operation.rt_module.get_grid_shape(
self.operation.rt_module.get_tiled_shape(
temp_argument.problem_size.ctype,
self.threadblock_shape.ctype,
temp_argument.batch_count
)
)
self.total_tiles += grid.x * grid.y * grid.z
self.problem_size_buffer = todevice(problem_size_host, np.int32)
self.ptr_A_buffer = todevice(self.ptr_A_host, np.int64)
self.ptr_B_buffer = todevice(self.ptr_B_host, np.int64)
self.ptr_C_buffer = todevice(self.ptr_C_host, np.int64)
self.ptr_D_buffer = todevice(self.ptr_D_host, np.int64)
self.lda_buffer = todevice(lda_host, np.int64)
self.ldb_buffer = todevice(ldb_host, np.int64)
self.ldc_buffer = todevice(ldc_host, np.int64)
self.ldd_buffer = todevice(ldd_host, np.int64)
if "output_op" in kwargs.keys():
self.alpha = kwargs["output_op"].alpha
self.beta = kwargs["output_op"].beta
else:
self.alpha = 1.0
self.beta = 0.0
if "output_op" in kwargs.keys():
self.output_op = kwargs["output_op"]
else:
self.output_op = self.operation.epilogue_type(1.0, 0.0)
# Get host problem size
self.host_problem_size_ptr = np.array(problem_size_host, dtype=np.int32).__array_interface__["data"][0]
self.arguments = self.get_arguments()
self.initialize()
def get_arguments(self):
return self.operation.argument_type(
self.problem_size_buffer.ptr,
self.problem_count,
self.total_tiles,
self.output_op,
self.ptr_A_buffer.ptr,
self.ptr_B_buffer.ptr,
self.ptr_C_buffer.ptr,
self.ptr_D_buffer.ptr,
self.lda_buffer.ptr,
self.ldb_buffer.ptr,
self.ldc_buffer.ptr,
self.ldd_buffer.ptr,
ctypes.c_void_p(int(self.host_problem_size_ptr)),
)
def initialize(self):
# Get launch configuration
launch_config = self.operation.rt_module.plan(self)
# Get the host and evice workspace
device_workspace_size = self.operation.rt_module.get_device_workspace_size(self)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
if self.operation.precompute_mode == SchedulerMode.Host:
device_workspace_ptr = self.operation.rt_module.host_precompute(
self, self.operation.rt_module.get_workspace_size(self),)
else:
device_workspace_ptr = 0
result = self.operation.rt_module.get_args(
ctypes.byref(self.arguments),
self.total_tiles,
ctypes.c_void_p(int(device_workspace_ptr)),
)
host_workspace = bytearray(result.contents)
device_workspace = None
self.host_workspace = host_workspace
self.device_workspace = device_workspace
self.launch_config = launch_config
def sync(self):
err, = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
for arg in self.gemm_arguments:
arg.sync(stream_sync=False)
################################################################################
# Base class for GEMM runtime module
################################################################################
class GemmRTbase(ExecutableOperation):
"""
GemmRT manages the CUTLASS runtime components
"""
KernelTemplate = r"""
extern "C"
__global__ void
${operation_name}(${operation_name}${operation_suffix}::Params params) {
// Dynamic shared memory base pointer
extern __shared__ int SharedStorageBase[];
// Declare pointer to dynamic shared memory.
${operation_name}${operation_suffix}::SharedStorage *shared_storage =
reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);
${operation_name}${operation_suffix}::invoke(params, *shared_storage);
}
"""
def __init__(self, operation: "GemmOperation"):
super().__init__(operation)
self.operation = operation
threadblock_shape = operation.tile_description.threadblock_shape
self.threadblock_shape = GemmCoord(
threadblock_shape[0], threadblock_shape[1], threadblock_shape[2])
self.threadblock_swizzle = operation.swizzling_functor
# Threads per threadblock
self.threads = operation.tile_description.num_threads
def emit(self):
return self.emitter.emit(self.operation)
def can_implement(self, configuration, arguments):
raise NotImplementedError()
def get_host_workspace_size(self, arguments):
raise NotImplementedError()
def get_device_workspace_size(self, arguments):
return 0
def initialize(self):
err, = cuda.cuFuncSetAttribute(
self.kernel,
attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
value=self.shared_memory_capacity)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(
f"CUDA error on call to cuFuncSetAttribute: {cuda.cuGetErrorString(err)[1]}"
)
################################################################################
# Runtime module for GEMM Universal
################################################################################
class GemmRTUniversal(GemmRTbase):
"""
GemmRTUniversal manages the CUTLASS runtime components
"""
HostTemplate = r"""
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
// Get the params as byte array
char* ${operation_name}_get_params(${operation_name}_base::Arguments* argument, int* workspace){
${operation_name}_base::Params* params;
params = new ${operation_name}_base::Params(*argument,
-1, // SM count. Only used for stream-K
-1 // Occupancy. Only used for stream-K
);
// Semaphore holds the pointer to the workspace in the Params struct
params->semaphore = workspace;
char *bytes = ((char*)(params));
char *output = new char[sizeof(${operation_name}_base::Params)];
for (unsigned int i = 0; i < sizeof(${operation_name}_base::Params); i ++)
output[i] = bytes[i];
return output;
}
cutlass::gemm::GemmCoord ${operation_name}_get_tiled_shape(
cutlass::gemm::GemmCoord problem_size, cutlass::gemm::GemmCoord tile_size, int split_k_slices) {
return ${operation_name}_base::ThreadblockSwizzle::get_tiled_shape(
problem_size, tile_size, split_k_slices);
}
dim3 ${operation_name}_get_grid_shape(cutlass::gemm::GemmCoord tiled_shape) {
return ${operation_name}_base::ThreadblockSwizzle::get_grid_shape(tiled_shape);
}
}
"""
def __init__(self, operation):
super(GemmRTUniversal, self).__init__(operation)
self.extra_funcs = {
"get_tiled_shape": GemmCoord_,
"get_grid_shape": dim3_,
}
self.emitter = EmitGemmUniversalInstance(
"_type", operation.direct_store)
self.argument_type, self.epilogue_type = get_gemm_arguments(operation.epilogue_functor)
self.argtype = [
ctypes.POINTER(self.argument_type),
ctypes.POINTER(GemmCoord_), ctypes.c_int, ctypes.c_void_p
]
def plan(self, arguments):
grid = self.get_tiled_shape(
arguments.problem_size.ctype,
self.threadblock_shape.ctype,
arguments.batch_count
)
gemm_k_size = arguments.problem_size.k
if arguments.gemm_mode in [GemmUniversalMode.Gemm, GemmUniversalMode.GemmSplitKParallel]:
alignk = max(max(128 // DataTypeSize[self.operation.A.element],
128 // DataTypeSize[self.operation.B.element]), 1)
gemm_k_size = (((arguments.problem_size.k + arguments.batch_count - 1) //
arguments.batch_count + alignk - 1) // alignk) * alignk
if gemm_k_size:
grid_z = (arguments.problem_size.k + gemm_k_size - 1) // gemm_k_size
grid = GemmCoord(grid.m, grid.n, grid_z).ctype
arguments.grid_tiled_shape = dim3_(grid.m, grid.n, grid.k)
grid = self.get_grid_shape(grid)
arguments.gemm_k_size = gemm_k_size
return LaunchConfiguration(
[grid.x, grid.y, grid.z],
[self.threads, 1, 1],
self.shared_memory_capacity)
def get_device_workspace_size(self, arguments: GemmArguments):
workspace_bytes = 0
if arguments.gemm_mode == GemmUniversalMode.GemmSplitKParallel:
workspace_bytes = (DataTypeSize[arguments.operation.C.element]
* arguments.batched_stride_D * arguments.grid_tiled_shape.z // 8)
elif (arguments.gemm_mode == GemmUniversalMode.Gemm and
arguments.split_k_slices > 1):
workspace_bytes = 4 * arguments.grid_tiled_shape.x * arguments.grid_tiled_shape.y
return workspace_bytes
class GemmRTUniversalStreamK(GemmRTUniversal):
"""
Manages the CUTLASS runtime components for 2.x stream K kernels
"""
HostTemplate = r"""
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
using GemmType = ${operation_name}_base;
// Get the params as byte array
char* ${operation_name}_get_params(GemmType::Arguments* argument, int* workspace,
int sm_count, int occupancy) {
GemmType::Params* params;
params = new GemmType::Params(*argument, sm_count, occupancy);
params->init_workspace(workspace);
char *bytes = ((char*)(params));
char *output = new char[sizeof(GemmType::Params)];
for (unsigned int i = 0; i < sizeof(GemmType::Params); i ++)
output[i] = bytes[i];
return output;
}
dim3 ${operation_name}_get_grid_shape(GemmType::Arguments* args, int device_sms, int sm_occupancy) {
typename GemmType::Params params(*args, device_sms, sm_occupancy);
return params.get_grid_dims();
}
uint64_t ${operation_name}_get_kernel_workspace_size(GemmType::Arguments* args, int device_sms, int sm_occupancy) {
typename GemmType::Params params(*args, device_sms, sm_occupancy);
return params.get_workspace_size();
}
}
"""
def __init__(self, operation: "GemmOperation"):
super(GemmRTUniversalStreamK, self).__init__(operation)
self.extra_funcs = {
"get_grid_shape": GemmCoord_,
"get_kernel_workspace_size": ctypes.c_uint64,
}
self._occupancy = None
self.argument_type, self.epilogue_type = get_gemm_arguments_streamk(operation.epilogue_functor)
@property
def occupancy(self):
if self._occupancy is None:
err, self._occupancy = cuda.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(
self.kernel, self.threads, self.shared_memory_capacity,
cuda.CUoccupancy_flags.CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(
"CUDA error on call to cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags: "
f"{cuda.cuGetErrorString(err)[1]}")
return self._occupancy
def get_device_workspace_size(self, arguments: GemmArguments2xStreamK, device_sms: int, sm_occupancy: int):
return self.get_kernel_workspace_size(ctypes.byref(arguments.get_arguments()), device_sms, sm_occupancy)
################################################################################
# Runtime module for GEMM Universal within CUTLASS 3
################################################################################
class GemmRTUniversal3x(GemmRTUniversal):
"""
Manages the CUTLASS runtime components for 3.x kernels
"""
KernelTemplate = r"""
using Operator = ${operation_name}${operation_suffix};
extern "C"
__global__ __launch_bounds__(Operator::MaxThreadsPerBlock, Operator::MinBlocksPerMultiprocessor)
void ${operation_name}(__grid_constant__ typename Operator::Params const params) {
// Dynamic shared memory base pointer
extern __shared__ char smem[];
// Declare pointer to dynamic shared memory.
Operator op;
op(params, smem);
}
"""
HostTemplate = r"""
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return ${operation_name}${operation_suffix}::SharedStorageSize;
}
using GemmType = ${operation_name}_base;
bool ${operation_name}_uses_default_epilogue() {
return std::is_same_v<GemmType::CollectiveEpilogue::DispatchPolicy, cutlass::gemm::EpilogueDefault>;
}
// Get the workspace size
uint64_t ${operation_name}_get_kernel_workspace_size(GemmType::Arguments* argument) {
return GemmType::get_workspace_size(*argument);
}
// Get the params as byte array
char* ${operation_name}_get_params(GemmType::Arguments* argument, int* workspace){
GemmType::Params params = GemmType::to_underlying_arguments(*argument, workspace);
char *bytes = ((char*)(¶ms));
char *output = new char[sizeof(GemmType::Params)];
for (unsigned int i = 0; i < sizeof(GemmType::Params); i ++)
output[i] = bytes[i];
return output;
}
// Get the total number of blocks for a persistent kernel
uint64_t ${operation_name}_get_persistent_tiled_blk_shape_mnl(GemmType::ProblemShape problem) {
auto problem_shape_MNKL = append<4>(problem, Int<1>{});
auto [problem_blocks_m, problem_blocks_n, problem_blocks_l] =
cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::get_tiled_cta_shape_mnl(
problem_shape_MNKL, GemmType::TileShape{}, GemmType::DispatchPolicy::ClusterShape{});
return problem_blocks_m * problem_blocks_n * problem_blocks_l;
}
// Get the grid shape
dim3 ${operation_name}_get_grid_shape(GemmType::Arguments* args, int* workspace) {
auto tmp_params = GemmType::to_underlying_arguments(*args, workspace);
return GemmType::get_grid_shape(tmp_params);
}
// Get the block shape
dim3 ${operation_name}_get_block_shape() {
return GemmType::get_block_shape();
}
}
"""
def __init__(self, operation):
super(GemmRTUniversal3x, self).__init__(operation)
self.extra_funcs = {
"get_grid_shape": dim3_,
"get_block_shape": dim3_,
"get_persistent_tiled_blk_shape_mnl": ctypes.c_uint64,
"get_kernel_workspace_size": ctypes.c_uint64,
"uses_default_epilogue": ctypes.c_bool,
}
self.emitter = EmitGemmUniversalInstance3x("_type")
def get_device_workspace_size(self, arguments: GemmArguments3x):
return self.get_kernel_workspace_size(ctypes.byref(arguments.get_arguments()))
class EmitGemmUniversalInstance3x:
"""Responsible for emitting a CUTLASS 3 template definition"""
def __init__(self, operation_suffix=""):
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cute/tensor.hpp",
"cute/atom/mma_atom.hpp",
"cutlass/numeric_types.h",
"cutlass/gemm/collective/collective_builder.hpp",
"cutlass/gemm/kernel/sm90_tile_scheduler.hpp",
"cutlass/gemm/kernel/gemm_universal.hpp",
"cutlass/epilogue/collective/collective_builder.hpp",
"cutlass/epilogue/collective/default_epilogue.hpp",
"cutlass/epilogue/thread/linear_combination.h"
]
self.gemm_template_kernel = """
using namespace cute;
using CollectiveEpilogue =
typename cutlass::epilogue::collective::CollectiveBuilder<
${arch}, ${opcode_class},
cute::Shape<cute::_${threadblock_shape_m}, cute::_${threadblock_shape_n}, cute::_${threadblock_shape_k}>,
cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
cutlass::epilogue::collective::EpilogueTileAuto,
${element_accumulator}, ${element_epilogue},
${element_c}, ${layout_c}, ${align_c},
${element_d}, ${layout_d}, ${align_d},
${epilogue_schedule}
>::CollectiveOp;
using CollectiveMainloop =
typename cutlass::gemm::collective::CollectiveBuilder<
${arch}, ${opcode_class},
${element_a}, ${layout_a}, ${align_a},
${element_b}, ${layout_b}, ${align_b},
${element_accumulator},
cute::Shape<cute::_${threadblock_shape_m}, cute::_${threadblock_shape_n}, cute::_${threadblock_shape_k}>,
cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
${stage_count_type},
${kernel_schedule}
>::CollectiveOp;
// Gemm operator ${operation_name}
using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
${tile_scheduler}
>;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_kernel_visitor = """
using namespace cute;
${callback_decl}
using CollectiveEpilogue =
typename cutlass::epilogue::collective::CollectiveBuilder<
${arch}, ${opcode_class},
cute::Shape<cute::_${threadblock_shape_m}, cute::_${threadblock_shape_n}, cute::_${threadblock_shape_k}>,
cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
cutlass::epilogue::collective::EpilogueTileAuto,
${element_accumulator}, ${element_epilogue},
ElementC, StrideC, ${align_c},
ElementD, StrideD, ${align_d},
${epilogue_schedule},
${callback_name}
>::CollectiveOp;
using CollectiveMainloop =
typename cutlass::gemm::collective::CollectiveBuilder<
${arch}, ${opcode_class},
${element_a}, ${layout_a}, ${align_a},
${element_b}, ${layout_b}, ${align_b},
${element_accumulator},
cute::Shape<cute::_${threadblock_shape_m}, cute::_${threadblock_shape_n}, cute::_${threadblock_shape_k}>,
cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
${stage_count_type},
${kernel_schedule}
>::CollectiveOp;
// Gemm operator ${operation_name}
using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
${tile_scheduler}
>;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_device = self.gemm_template_kernel + """
// Define device-level operator
using DeviceKernel = cutlass::gemm::device::GemmUniversalAdapter<${operation_name}${operation_suffix}>;
"""
def emit(self, operation):
# Support built-in epilogue functors or user-defined functions
if operation.tile_description.stages is None or operation.tile_description.stages == 0:
stage_count_type = "cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>"
else:
stage_count_type = "_" + str(operation.tile_description.stages)
if operation.emission_type == EmissionType.Kernel:
gemm_template = self.gemm_template_kernel
else:
gemm_template = self.gemm_template_device
kschedule = KernelScheduleType.ScheduleAuto
eschedule = EpilogueScheduleType.ScheduleAuto
tschedule = TileSchedulerType.Default
if operation.tile_description.kernel_schedule is not None:
kschedule = operation.tile_description.kernel_schedule
if operation.tile_description.epilogue_schedule is not None:
eschedule = operation.tile_description.epilogue_schedule
if operation.tile_description.tile_scheduler is not None:
tschedule = operation.tile_description.tile_scheduler
values = {
"operation_name": operation.procedural_name(),
"operation_suffix": self.operation_suffix,
"element_a": DataTypeTag[operation.A.element],
"layout_a": LayoutTag[operation.A.layout],
"element_b": DataTypeTag[operation.B.element],
"layout_b": LayoutTag[operation.B.layout],
"element_c": DataTypeTag[operation.C.element],
"layout_c": LayoutTag[operation.C.layout],
"element_d": DataTypeTag[operation.epilogue_functor.element_output],
"layout_d": LayoutTag[operation.C.layout],
"element_accumulator": DataTypeTag[operation.accumulator_type()],
"element_epilogue": DataTypeTag[operation.epilogue_functor.element_epilogue],
"opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
"arch": "cutlass::arch::Sm%d" % operation.arch,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"cluster_m": str(operation.tile_description.cluster_shape[0]),
"cluster_n": str(operation.tile_description.cluster_shape[1]),
"cluster_k": str(operation.tile_description.cluster_shape[2]),
"align_a": str(operation.A.alignment),
"align_b": str(operation.B.alignment),
"align_c": str(operation.C.alignment),
"align_d": str(operation.C.alignment),
"stage_count_type": stage_count_type,
"kernel_schedule": KernelScheduleTag[kschedule],
"epilogue_schedule": EpilogueScheduleTag[eschedule],
"tile_scheduler": TileSchedulerTag[tschedule]
}
if hasattr(operation.epilogue_functor, "visitor"):
callback_name, callback_decl = operation.epilogue_functor.emit(operation)
values["callback_name"] = callback_name
values["callback_decl"] = callback_decl
return SubstituteTemplate(self.gemm_template_kernel_visitor, values)
else:
values["epilogue_functor"] = operation.epilogue_functor.emit()
return SubstituteTemplate(gemm_template, values)
###################################################################################################
# Runtime module for GEMM Grouped
###################################################################################################
class GemmRTGrouped(GemmRTbase):
"""
GemmRTGrouped manages the CUTLASS runtime components
"""
KernelTemplate = r"""
extern "C"
__global__ void
${operation_name}(${operation_name}${operation_suffix}::Params params) {
// Dynamic shared memory base pointer
extern __shared__ int SharedStorageBase[];
// Declare pointer to dynamic shared memory.
${operation_name}${operation_suffix}::SharedStorage *shared_storage =
reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);
${operation_name}${operation_suffix} op;
op(params, *shared_storage);
}
"""
HostTemplate = r"""
extern "C" {
// precompute scheduling information
char * ${operation_name}_precompute(${operation_name}_base::Arguments const &args, int tile_count, size_t workspace_bytes) {
char* host_workspace = new char[workspace_bytes];
${operation_name}_base::ProblemVisitor::host_precompute(
args.host_problem_sizes,
args.problem_count,
args.threadblock_count,
(void*)host_workspace
);
return host_workspace;
}
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
// Get the params as byte array
char* ${operation_name}_get_params(${operation_name}_base::Arguments* argument, int tile_count, void* workspace=nullptr){
${operation_name}_base::Params* params;
params = new ${operation_name}_base::Params(*argument, workspace, tile_count);
char *bytes = ((char*)(params));
char *output = new char[sizeof(${operation_name}_base::Params)];
for (unsigned int i = 0; i < sizeof(${operation_name}_base::Params); i ++)
output[i] = bytes[i];
return output;
}
cutlass::gemm::GemmCoord ${operation_name}_get_tiled_shape(
cutlass::gemm::GemmCoord problem_size, cutlass::gemm::GemmCoord tile_size, int split_k_slices) {
return ${operation_name}_base::ThreadblockSwizzle::get_tiled_shape(
problem_size, tile_size, split_k_slices);
}
dim3 ${operation_name}_get_grid_shape(cutlass::gemm::GemmCoord tiled_shape) {
return ${operation_name}_base::ThreadblockSwizzle::get_grid_shape(tiled_shape);
}
}
"""
def __init__(self, operation: "GemmOperation"):
super(GemmRTGrouped, self).__init__(operation)
self.extra_funcs = {
"precompute": None,
"get_tiled_shape": GemmCoord_,
"get_grid_shape": dim3_,
}
self.emitter = EmitGemmGroupedInstance("_type")
self.argument_type, self.epilogue_type = get_gemm_grouped_arguments(operation.epilogue_functor)
self.argtype = [ctypes.POINTER(self.argument_type), ctypes.c_int, ctypes.c_void_p]
def host_precompute(self, arguments, workspace_bytes):
self.precompute.argtype = [
self.argtype[0], ctypes.c_int, ctypes.c_longlong]
self.precompute.restype = ctypes.POINTER(ctypes.c_byte * workspace_bytes)
problem_info = self.precompute(
ctypes.byref(arguments.arguments),
arguments.total_tiles,
workspace_bytes)
problem_info_array = bytearray(problem_info.contents)
# copy to device memory
return todevice(problem_info_array).ptr
def plan(self, arguments):
return LaunchConfiguration(
[arguments.total_tiles, 1, 1],
[self.threads, 1, 1],
self.shared_memory_capacity,
)
def get_workspace_size(self, arguments):
if self.operation.precompute_mode == SchedulerMode.Device:
return 0
elif self.operation.precompute_mode == SchedulerMode.Host:
total_tiles = arguments.total_tiles
entries_per_block = 1
return 8 * entries_per_block * total_tiles # three int32_t
################################################################################
# Runtime module for GEMM and grouped GEMM
################################################################################
class GemmOperationBase:
"""
CUTLASS GEMM operation
"""
def __init__(
self, gemm_kind, arch, tile_description: TileDescription,
A: TensorDescription, B: TensorDescription, C: TensorDescription,
epilogue_functor, swizzling_functor=SwizzlingFunctor.Identity1,
api=ApiVersion.v2x, emission_type=EmissionType.Kernel, **kwargs):
self.operation_kind: OperationKind = OperationKind.Gemm
self.arch: int = arch
self.tile_description: TileDescription = tile_description
self.gemm_kind: GemmKind = gemm_kind
self.api = api
self.prefix = "3x" if self.api == ApiVersion.v3x else ""
self.emission_type = emission_type
# Optionally swap the TensorDescriptions for operands A and B and transpose their
# layouts. This is needed to mimic the transpose performed by device::GemmUniversal.
# The code below uses deep copy to avoid overwritting the original TensorDescription
self.switched = (self.api != ApiVersion.v3x and
self.emission_type == EmissionType.Kernel and
C.layout == LayoutType.ColumnMajor)
self.A, self.B, self.C = GemmOperationBase.get_operands(A, B, C, self.switched)
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
if "direct_store" in kwargs:
self.direct_store = kwargs["direct_store"]
else:
self.direct_store = False
@staticmethod
def get_operands(A: TensorDescription, B: TensorDescription, C: TensorDescription, swap: bool):
"""
Makes copies of A, B, and C, and possibly transposes their order. If ``swap`` is set,
A and B are swapped, and the layout of A, B, and C are transposed.
:param A: description of operand A
:type A: TensorDescription
:param B: description of operand B
:type B: TensorDescription
:param C: description of operand C
:type C: TensorDescription
:return: descriptions of operands A, B, and C
:rtype: tuple[TileDescription]
"""
if swap:
A_out = copy.deepcopy(B)
B_out = copy.deepcopy(A)
C_out = copy.deepcopy(C)
A_out.layout = transpose_layout(A_out.layout)
B_out.layout = transpose_layout(B_out.layout)
C_out.layout = transpose_layout(C_out.layout)
else:
A_out = copy.deepcopy(A)
B_out = copy.deepcopy(B)
C_out = copy.deepcopy(C)
return A_out, B_out, C_out
def run(self, arguments: GemmArguments) -> cuda.CUresult:
"""
Configure and launch the cuda kernel with input arguments
"""
if self.emission_type == EmissionType.Device:
raise Exception('Running a kernel via PyCUTLASS is only enabled with emission type "Kernel"')
err = self.rt_module.run(
arguments.host_workspace,
arguments.device_workspace,
arguments.launch_config,
arguments.stream
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
return err
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32,
]
return self.tile_description.math_instruction.math_operation in complex_operators
def is_planar_complex(self):
return self.gemm_kind in (GemmKind.PlanarComplex, GemmKind.PlanarComplexArray)
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
def core_name(self):
"""The basic operation kind is prefixed with a letter indicating the accumulation type."""
inst_shape = ""
inst_operation = ""
intermediate_type = ""
math_operations_map = {
MathOperation.xor_popc: "xor",
}
if (self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp):
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ""
if self.tile_description.math_instruction.instruction_shape is not None:
if self.api == ApiVersion.v3x and self.arch >= 90:
inst_shape = "%dx%dx%d" % tuple(
self.tile_description.math_instruction.instruction_shape)
else:
inst_shape = "%d%d%d" % tuple(
self.tile_description.math_instruction.instruction_shape)
else:
inst_shape = "Default"
inst_shape += math_op_string
if (self.tile_description.math_instruction.element_a != self.A.element and
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator):
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, GemmKindNames[self.gemm_kind])
def extended_name(self):
"""Append data types if they differ from compute type."""
if self.is_complex():
extended_name = "${core_name}"
else:
if (self.C.element != self.tile_description.math_instruction.element_accumulator and
self.A.element != self.tile_description.math_instruction.element_accumulator):
extended_name = "${element_c}_${core_name}_${element_a}"
elif (self.C.element == self.tile_description.math_instruction.element_accumulator and
self.A.element != self.tile_description.math_instruction.element_accumulator):
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
"element_a": DataTypeNames[self.A.element],
"element_c": DataTypeNames[self.C.element],
"core_name": self.core_name(),
})
return extended_name
def extended_name_3x(self):
"""Generates a string representing the MMA atom. Assumes accumulator type is C type."""
extended_name = "{core_name}_{element_a}_{element_b}_{element_acc}_{element_c}_{element_d}".format(
element_a=DataTypeNames[self.A.element],
element_b=DataTypeNames[self.B.element],
element_acc=DataTypeNames[self.accumulator_type()],
element_c=DataTypeNames[self.C.element],
element_d=DataTypeNames[self.epilogue_functor.element_output],
core_name=self.core_name())
return extended_name
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)],
ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)]
)
return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout])
# Generates a short string representing the ABC layout tags (e.g. ntn or tnn)
def layout_name_3x(self):
if self.is_complex() or self.is_planar_complex():
return "{}{}{}".format(
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)],
ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)],
ShortComplexLayoutNames[(self.C.layout, self.C.complex_transform)])
else:
return "{}{}{}".format(
ShortLayoutTypeNames[self.A.layout],
ShortLayoutTypeNames[self.B.layout],
ShortLayoutTypeNames[self.C.layout])
# Generates a short string representing underlying kernel schedule type
def kernel_schedule_name_3x(self):
if self.tile_description.kernel_schedule is None:
return KernelScheduleSuffixes[KernelScheduleType.ScheduleAuto]
else:
return KernelScheduleSuffixes[self.tile_description.kernel_schedule]
# Generates a short string representing underlying epilogue schedule type
def epilogue_schedule_name_3x(self):
if self.tile_description.epilogue_schedule is None:
return EpilogueScheduleSuffixes[EpilogueScheduleType.ScheduleAuto]
else:
return EpilogueScheduleSuffixes[self.tile_description.epilogue_schedule]
def procedural_name(self):
"""The full procedural name indicates architecture, extended name, tile size, and layout."""
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
if self.api == ApiVersion.v3x and self.arch >= 90:
kernel_name_template = "cutlass{p}_sm{ar}_{op}_{ex}_{tbm}x{tbn}x{tbk}_{cm}x{cn}x{ck}_{l}_{s}_align{al}{k}{e}"
return kernel_name_template.format(
p=self.prefix,
ar=self.arch,
op=opcode_class_name,
ex=self.extended_name_3x(),
tbm=self.tile_description.threadblock_shape[0],
tbn=self.tile_description.threadblock_shape[1],
tbk=self.tile_description.threadblock_shape[2],
cm=self.tile_description.cluster_shape[0],
cn=self.tile_description.cluster_shape[1],
ck=self.tile_description.cluster_shape[2],
l=self.tile_description.stages,
s=self.layout_name_3x(),
al=str(self.A.alignment),
k=self.kernel_schedule_name_3x(),
e=self.epilogue_schedule_name_3x()
)
else:
threadblock = self.tile_description.procedural_name_2x()
return "cutlass{p}_{op}_{ex}_{tb}_{l}_align{a}".format(
p=self.prefix,
op=opcode_class_name,
ex=self.extended_name(),
tb=threadblock,
l=self.layout_name(),
a=str(self.A.alignment)
)
def configuration_name(self):
"""The full procedural name indicates architecture, extended name, tile size, and layout."""
return self.procedural_name()
class GemmOperationUniversal(GemmOperationBase):
def __init__(self, arch, tile_description: TileDescription, A: TensorDescription, B, C,
epilogue_functor, swizzling_functor=SwizzlingFunctor.Identity1, **kwargs):
api = api_version(arch, tile_description.math_instruction.opcode_class, A.element)
super(GemmOperationUniversal, self).__init__(GemmKind.Universal, arch, tile_description,
A, B, C, epilogue_functor, swizzling_functor,
api=api, **kwargs, )
if api == ApiVersion.v3x:
if swizzling_functor == SwizzlingFunctor.StreamK:
raise Exception("Stream K swizzle functor is currently only supported for CUTLASS 2.x kernels")
self.rt_module = GemmRTUniversal3x(self)
else:
if swizzling_functor == SwizzlingFunctor.StreamK:
self.rt_module = GemmRTUniversalStreamK(self)
else:
self.rt_module = GemmRTUniversal(self)
self.argument_type = self.rt_module.argument_type
self.epilogue_type = self.rt_module.epilogue_type
def device_op(self):
"""
Returns a new GemmOperationUniversal object that is constructed with emission type
``EmissionType.Device``. Since the device-emitted kernel does not require swapping,
any swappng performed by the kernel-emitted operation is reversed.
:return: operation ready for device-level code emission
:rtype: GemmUniversalOperation
"""
A, B, C = GemmOperationBase.get_operands(self.A, self.B, self.C, self.switched)
return GemmOperationUniversal(self.arch, self.tile_description, A, B, C,
self.epilogue_functor, self.swizzling_functor,
emission_type=EmissionType.Device, direct_store=self.direct_store)
class GemmOperationGrouped(GemmOperationBase):
def __init__(self, arch, tile_description: TileDescription, A: TensorDescription, B, C,
epilogue_functor, swizzling_functor=SwizzlingFunctor.Identity1, **kwargs):
super(GemmOperationGrouped, self).__init__(GemmKind.Grouped, arch, tile_description,
A, B, C, epilogue_functor, swizzling_functor, **kwargs)
assert "precompute_mode" in kwargs.keys(), "missing keyword arguement 'precompute_mode'."
self.precompute_mode = kwargs["precompute_mode"]
self.rt_module = GemmRTGrouped(self)
self.argument_type = self.rt_module.argument_type
self.epilogue_type = self.rt_module.epilogue_type
def device_op(self):
"""
Returns a new GemmOperationGrouped object that is constructed with emission type
``EmissionType.Device``. Since the device-emitted kernel does not require swapping,
any swappng performed by the kernel-emitted operation is reversed.
:return: operation ready for device-level code emission
:rtype: GemmOperationGrouped
"""
A, B, C = GemmOperationBase.get_operands(self.A, self.B, self.C, self.switched)
return GemmOperationGrouped(
self.arch, self.tile_description, A, B, C, self.epilogue_functor,
self.swizzling_functor, emission_type=EmissionType.Device,
direct_store=self.direct_store, precompute_mode=self.precompute_mode, )
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
class EmitGemmUniversalInstance:
"""Responsible for emitting a CUTLASS template definition"""
def __init__(
self,
operation_suffix="",
direct_store=False
):
self.operation_suffix = operation_suffix
self.direct_store = direct_store
self.includes = [
"cutlass/cutlass.h",
"cutlass/gemm_coord.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/device/gemm.h",
"cutlass/gemm/device/gemm_universal_adapter.h",
"cutlass/gemm/kernel/default_gemm_universal.h",
]
if self.direct_store:
self.includes.append(
"cutlass/epilogue/threadblock/default_epilogue_direct_store.h"
)
self.gemm_template_kernel = """
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operation}
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_device = """
// Gemm operator ${operation_name}
using DeviceKernel =
typename cutlass::gemm::device::GemmUniversal<
// Data type and layout of operand A
${element_a}, ${layout_a},
// Data type and layout of operand B
${element_b}, ${layout_b},
// Data type and layout of operand C
${element_c}, ${layout_c},
// Data type of accumulator
${element_accumulator},
// Class of operation
${opcode_class},
// Compute capability of the target kernel
${arch},
// Threadblock tile shape
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
// Warp tile shape
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
// Instruction shape
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
// Epilogue functor
${epilogue_functor},
// Swizzling function
${swizzling_functor},
// Number of pipeline stages
${stages},
// Alignment of operands A and B
${align_a}, ${align_b},
// Type of math operation
${math_operation},
// Complex transform types of operands A and B
${transform_a}, ${transform_b}
>;
"""
self.gemm_template_direct_store = """
// Gemm operator ${operation_name}
using ${operation_name}_default =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operation}
>::GemmKernel;
using ${operation_name}_base =
cutlass::gemm::kernel::GemmUniversal<
${operation_name}_default::Mma,
cutlass::epilogue::threadblock::DefaultEpilogueDirectStore<
${operation_name}_default::Epilogue
>::Epilogue,
${operation_name}_default::ThreadblockSwizzle
>;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_kernel_visitor = """
using OutputTileThreadMap = cutlass::epilogue::threadblock::OutputTileThreadLayout<
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
${element_c},
${align_c},
${epilogue_stages} /* epilogue stages */
>;
${callback_decl}
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmWithVisitor<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c}, ${align_c},
${element_accumulator},
${element_epilogue},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${callback_name},
${swizzling_functor},
${stages},
${math_operation},
${epilogue_stages} /* epilogue stages */
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
instance_layout_A, instance_layout_B, instance_layout_C = \
(operation.A.layout, operation.B.layout, operation.C.layout)
if operation.emission_type == EmissionType.Kernel:
if self.direct_store:
gemm_template = self.gemm_template_direct_store
else:
gemm_template = self.gemm_template_kernel
else:
gemm_template = self.gemm_template_device
values = {
"operation_name": operation.procedural_name(),
"operation_suffix": self.operation_suffix,
"element_a": DataTypeTag[operation.A.element],
"layout_a": LayoutTag[instance_layout_A],
"element_b": DataTypeTag[operation.B.element],
"layout_b": LayoutTag[instance_layout_B],
"element_c": DataTypeTag[operation.C.element],
"layout_c": LayoutTag[instance_layout_C],
"element_accumulator": DataTypeTag[operation.accumulator_type()],
"opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
"arch": "cutlass::arch::Sm%d" % operation.arch,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"warp_shape_m": str(warp_shape[0]),
"warp_shape_n": str(warp_shape[1]),
"warp_shape_k": str(warp_shape[2]),
"instruction_shape_m": str(operation.tile_description.math_instruction.instruction_shape[0]),
"instruction_shape_n": str(operation.tile_description.math_instruction.instruction_shape[1]),
"instruction_shape_k": str(operation.tile_description.math_instruction.instruction_shape[2]),
"swizzling_functor": SwizzlingFunctorTag[operation.swizzling_functor],
"stages": str(operation.tile_description.stages),
"align_a": str(operation.A.alignment),
"align_b": str(operation.B.alignment),
"transform_a": ComplexTransformTag[operation.A.complex_transform],
"transform_b": ComplexTransformTag[operation.B.complex_transform],
"math_operation": MathOperationTag[operation.tile_description.math_instruction.math_operation],
}
if hasattr(operation.epilogue_functor, "visitor"):
self.includes += [
"cutlass/epilogue/threadblock/fusion/visitors.hpp",
"cutlass/gemm/kernel/default_gemm_universal_with_visitor.h"
]
callback_name, callback_decl = operation.epilogue_functor.emit(operation)
values["callback_name"] = callback_name
values["callback_decl"] = callback_decl
values["align_c"] = str(operation.C.alignment)
values["element_epilogue"] = DataTypeTag[operation.epilogue_functor.element_epilogue]
if hasattr(operation.epilogue_functor, "epilogue_stages"):
epilogue_stages = operation.epilogue_functor.epilogue_stages
else:
epilogue_stages = 1
values["epilogue_stages"] = str(epilogue_stages)
return SubstituteTemplate(self.gemm_template_kernel_visitor, values)
else:
values["epilogue_functor"] = operation.epilogue_functor.emit()
return SubstituteTemplate(gemm_template, values)
class EmitGemmGroupedInstance:
"""Responsible for emitting a CUTLASS template definition"""
def __init__(self, operation_suffix=""):
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/kernel/gemm_grouped.h",
"cutlass/gemm/kernel/default_gemm_grouped.h",
]
self.gemm_template_kernel = """
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmGrouped<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${precompute_mode},
${math_operation}
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_device = (
self.gemm_template_kernel
+ """
using DeviceKernel = cutlass::gemm::device::GemmGrouped<${operation_name}_base>;
"""
)
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmGrouped<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
instance_layout_A, instance_layout_B, instance_layout_C = \
(operation.A.layout, operation.B.layout, operation.C.layout)
# Support built-in epilogue functors or user-defined functions
epilogue_functor = operation.epilogue_functor.emit()
values = {
"operation_name": operation.procedural_name(),
"operation_suffix": self.operation_suffix,
"element_a": DataTypeTag[operation.A.element],
"layout_a": LayoutTag[instance_layout_A],
"element_b": DataTypeTag[operation.B.element],
"layout_b": LayoutTag[instance_layout_B],
"element_c": DataTypeTag[operation.C.element],
"layout_c": LayoutTag[instance_layout_C],
"element_accumulator": DataTypeTag[operation.accumulator_type()],
"opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
"arch": "cutlass::arch::Sm%d" % operation.arch,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"warp_shape_m": str(warp_shape[0]),
"warp_shape_n": str(warp_shape[1]),
"warp_shape_k": str(warp_shape[2]),
"instruction_shape_m": str(operation.tile_description.math_instruction.instruction_shape[0]),
"instruction_shape_n": str(operation.tile_description.math_instruction.instruction_shape[1]),
"instruction_shape_k": str(operation.tile_description.math_instruction.instruction_shape[2]),
"epilogue_functor": epilogue_functor,
"swizzling_functor": SwizzlingFunctorTag[operation.swizzling_functor],
"stages": str(operation.tile_description.stages),
"align_a": str(operation.A.alignment),
"align_b": str(operation.B.alignment),
"transform_a": ComplexTransformTag[operation.A.complex_transform],
"transform_b": ComplexTransformTag[operation.B.complex_transform],
"precompute_mode": SchedulerModeTag[operation.precompute_mode],
"math_operation": MathOperationTag[operation.tile_description.math_instruction.math_operation],
}
if operation.emission_type == EmissionType.Kernel:
gemm_template = self.gemm_template_kernel
else:
gemm_template = self.gemm_template_device
return SubstituteTemplate(gemm_template, values)
| python/cutlass/backend/gemm_operation.py/0 | {
"file_path": "python/cutlass/backend/gemm_operation.py",
"repo_id": "python",
"token_count": 36867
} | 39 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Ease-of-use interface for constructing, compiling, and running CONVs
The ``Conv2d`` interface is meant to allow one to easily instantiate, compile, and run
CONV2D operations in CUTLASS via Python, without specifying many configuration parameters.
Under the hood, the interface will select sensible default parameters for the many template
parameters for CUTLASS CONVs.
Note: optimal performance is not to be expected from this interface. To achieve optimal
performance, one should specify and tune each configuration parameter.
The simplest example of using this interface is the following:
.. highlight:: python
.. code-block:: python
# A, B, C, and D are torch/numpy/cupy tensor objects
plan = cutlass.op.Conv(A, B, C, D)
plan.run(stride=(1, 1), padding=(0, 0), dilation=(1, 1))
One can also use the interface by specifying data types of operands at construction
and using different tensor objects with these data types at runtime:
.. highlight:: python
.. code-block:: python
# The following is shorthand for:
# cutlass.op.Conv2d(kind="fprop",
# element_A=torch.float32, element_B=torch.float32,
# element_C=torch.float32, element_D=torch.float32,
# element_accumulator=torch.float32)
plan = cutlass.op.Conv2d(kind="fprop", element=torch.float32)
A0 = torch.rand((128, 256), dtype=torch.float32, device='cuda')
B0 = torch.rand((256, 64), dtype=torch.float32, device='cuda')
C0 = torch.zeros((128, 64), dtype=torch.float32, device='cuda')
D0 = torch.zeros((128, 64), dtype=torch.float32, device.'cuda')
plan.run(A0, B0, C0, D0, stride=(1, 1), padding=(0, 0), dilation=(1, 1))
A = torch.rand((32, 128), dtype=torch.float32, device='cuda')
B = torch.rand((128, 256), dtype=torch.float32, device='cuda')
C = torch.zeros((32, 256), dtype=torch.float32, device='cuda')
D = torch.zeros((32, 256), dtype=torch.float32, device.'cuda')
plan.run(A1, B1, C1, D1, stride=(1, 1), padding=(0, 0), dilation=(1, 1))
The interface additionally enables one to decouple the compilation of the underlying CUTLASS
kernel from its execution:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Conv2d(kind="fprop", element=np.float32)
# Do other work...
plan.run(A0, B0, C0, D0, stride=(1, 1), padding=(0, 0), dilation=(1, 1))
# Do other work...
plan.run(A1, B1, C1, D1, stride=(1, 1), padding=(0, 0), dilation=(1, 1))
Elementwise activation functions are easily fused to the GEMM via the interface:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Conv2d(kind="fprop", element=np.float32)
plan.activation = cutlass.epilogue.relu
Operations can also be run asynchronously:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Conv2d(kind="fprop", element=np.float32)
args = plan.run()
# Do other work...
args.sync()
"""
from cuda import cuda
from cutlass_library import (
ConvKind,
ConvMode,
DataTypeSize,
IteratorAlgorithm,
OperationKind,
SplitKMode,
StrideSupport,
)
import cutlass
from cutlass import epilogue
from cutlass.backend import compiler
from cutlass.backend.conv2d_operation import Conv2dArguments, Conv2dOperation
from cutlass.backend.reduction_operation import ReductionOperation, ReductionArguments
from cutlass.backend.library import TensorDescription, TileDescription
from cutlass.op.op import OperationBase
from cutlass.shape import Conv2DProblemSize, MatrixCoord
from cutlass.utils import check, datatypes
class Conv2d(OperationBase):
"""
Constructs a ``Conv2d`` object.
The convolution kind (fprop, wgrad, degrad), the data types of operands A, B, and C,
along with the data type of output D and that used for accumulation, are bound to the ``Conv``
object throughout its lifetime -- these are not to be changed after a ``Conv2d`` has been constructed.
The constructor has optional parameters for flexibly setting these parameters. The following
constructors are equivalent:
.. highlight:: python
.. code-block:: python
# Use F32 for A, B, C, D, and accumulation in fprop
# Use the generic ``element`` parameter to concisely set all data types for operands to the same values.
Conv2d(kind="fprop", element=cutlass.DataType.f32)
# Explicitly specify the data types to use for A, B, C, and D.
Conv2d(kind="fprop", element_A=cutlass.DataType.f32, element_B=cutlass.DataType.f32,
element_C=cutlass.DataType.f32, element_D=cutlass.DataType.f32)
# Set the data types and elements from existing tensors. Note that one can use different tensors when
# executing GEMM via the ``run()`` method than passed in here (though those passed in to ``run()`` must
# have the same data type as those passed in here).
# A, B, C, and D are torch.Tensor objects of type torch.float32 under the channel-last layout
Conv2d(kind="fprop", A=A, B=B, C=C, D=D)
# Explicitly specify the data type for only some of A, B, C, and D. Unspecified data types will inherit
# those passed in via the generic ``element``
Conv2d(kind="fprop", element_A=cutlass.DataType.f32, element_accumulator=cutlass.DataType.f32,
element=cutlass.DataType.f32)
The order of precedence for the setting of the data type for a given operand/output is as follows:
1) If the tensor type is specified (e.g., ``A``), use the data type inferred from this tensor
2) Otherwise, if the data type (e.g., ``element_A``) is specified, use those
3) Otherwise, use the generic values (e.g., ``element``)
:param kind: the convolution kind (i.e. fprop, wgrad, and dgrad)
:type kind: str
:param A: tensor representing data type of operand A
:param B: tensor representing data type of operand B
:param C: tensor representing data type of operand C
:param D: tensor representing data type of operand D
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type
:type element: cutlass.DataType
:param element_A: data type to be used for operand A
:type element_A: cutlass.DataType
:param element_B: data type to be used for operand B
:type element_B: cutlass.DataType
:param element_C: data type to be used for operand C
:type element_C: cutlass.DataType
:param element_D: data type to be used for operand D
:type element_D: cutlass.DataType
:param element_accumulator: data type to be used in accumulation of the product of operands A and B
:type element_accumulator: cutlass.DataType
:param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90
:type cc: int
:param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80
:type kernel_cc: int
"""
def __init__(
self, kind="fprop",
A=None, B=None, C=None, D=None, alpha=1.0, beta=0.0,
element=None,
element_A=None, element_B=None, element_C=None, element_D=None,
element_accumulator=None,
cc: int = None, kernel_cc: int = None
):
super().__init__(cc=cc, kernel_cc=kernel_cc, operation_kind=OperationKind.Conv2d)
# Verify the kernel cc
if self.current_cc == 90:
# The Conv2d kernel on Hopper (SM90) is currently unsupported
# Revert to use SM80-tagged kernels
cutlass.logger.warning("Reverting to using SM80-tagged kernel. Opclass may change.")
self.specified_kernel_cc = 80
self._reset_options(80)
# The arch is used in testing
self.arch = self.current_cc
self.name = "conv2d" + kind
# The convolution kind. (concept: cutlass_library.library.ConvKind)
self.conv_kind = datatypes.getattr_enum(ConvKind, kind)
# The element types (concept: cutlass library types) of A, B, C, and D
elements = []
layouts = []
# Complete the data types based on user-provided arguments
for elt, tens, name in zip([element_A, element_B, element_C, element_D],
[A, B, C, D],
["A", "B", "C", "D"]):
if elt is not None and tens is not None:
raise Exception(f'Must not specify both element_{name} and tensor {name}')
if elt is None and tens is None and element is None:
raise Exception(f'Must specify one of element_{name}, tensor {name}, or generic element.')
elt_to_set = None
lay_to_set = None
if tens is not None:
elt_to_set, _ = datatypes.get_datatype_and_layout(tens)
else:
elt_to_set = elt if elt is not None else element
assert elt_to_set is not None
# Currently we only support layout TensorNHWC
lay_to_set = cutlass.LayoutType.TensorNHWC
elements.append(datatypes.library_type(elt_to_set))
layouts.append(lay_to_set)
self._element_a, self._element_b, self._element_c, self._element_d = elements
self._layout_a, self._layout_b, self._layout_c, self._layout_d = layouts
self.A, self.B, self.C, self.D, self.alpha, self.beta = A, B, C, D, alpha, beta
if element_accumulator is None:
self._element_accumulator = self._element_c
else:
self._element_accumulator = datatypes.library_type(element_accumulator)
# Default inputs if none is supplied in run()
self.A = A
self.B = B
self.C = C
self.D = D
self.alpha = alpha
self.beta = beta
# We only specify the stride of the swizzling functor here
# The actual swizzling functor is determined in run based on conv_kind and stride
self._swizzling_stride = 1
# Arguments that will be set to default value in _reset_operations
# The default tile_description and op_class are fetched from manifest of cutlass library
self._tile_description = None
self.op_class = None
# The default identity epilogue will be created
self.epilogue_functor = None
self._reset_operations()
# Arguments that will be determined online based on arguments of "run"
# based on stride, input/output channels, alignment, and conv_kind
self._iterator_algorithm = None
self._stride_support = None
def _reset_operations(self, reset_epilogue: bool = True):
# Set the default op class
datatype_comb = (self._element_a, self._element_b, self._element_accumulator)
layout_comb = (self._layout_a, self._layout_b)
self.possible_op_classes = self.options.supporting_opclasses(
self._element_a, self._element_b, self._element_accumulator,
self._layout_a, self._layout_b, self._math_operation
)
if cutlass.OpcodeClass.TensorOp in self.possible_op_classes:
self.opclass = cutlass.OpcodeClass.TensorOp
elif cutlass.OpcodeClass.Simt in self.possible_op_classes:
self.opclass = cutlass.OpcodeClass.Simt
else:
if self._math_operation is not None:
math_op_str = f' and math operation {self._math_operation}'
else:
math_op_str = ''
raise Exception(f'No kernel configuration found for supported data type and layout '
f'combination {datatype_comb}x{layout_comb}{math_op_str}')
if reset_epilogue:
self._reset_epilogue_functor_activation(epilogue.identity)
self.alignment_pref_A = min(
128 // DataTypeSize[self._element_a], max(self.possible_operations.alignments("A")))
self.alignment_pref_B = min(
128 // DataTypeSize[self._element_b], max(self.possible_operations.alignments("B")))
self.alignment_pref_C = min(
128 // DataTypeSize[self._element_c], max(self.possible_operations.alignments("C")))
#
# Tile description Related
#
@property
def tile_description(self) -> TileDescription:
"""
Returns the tile description
"""
return self._tile_description
@tile_description.setter
def tile_description(
self, td=None):
"""
Set the tile description
:param td: tile description
:type td: cutlass.backend.TileDescription, or a dict with keys
{
"threadblock_shape": [int, int, int],
"warp_count": [int, int, int],
"stages": int,
"instruction_shape": [int, int, int] (optional),
"cluster_shape": [int, int, int] (optional)
}
"""
if td is None:
return
if isinstance(td, dict):
if self._tile_description is None:
op = self.possible_operations.default_operation(self._math_operation)
self._tile_description = datatypes.td_from_profiler_op(op)
if "cluster_shape" in td.keys():
if td["cluster_shape"] != [1, 1, 1]:
cutlass.logger.warning("Conv2d currently only support 'cluster_shape'=[1, 1, 1]'.")
td["cluster_shape"] = [1, 1, 1]
td = self._tile_description.clone_and_update(td)
valid, msg = self._valid_tile_description(td)
if valid:
self._tile_description = td
else:
raise Exception(msg)
def _valid_tile_description(self, td: TileDescription) -> tuple:
"""
Checks whether the provided tile description is valid for the given compute capability. At present,
this checks the following:
- Does the tile description use a number of stages supported by the compute capability in question?
- Does the tile size requested fit within shared memory?
- Are cluster dimensions outside the valid range requested for a given architecture (e.g.,
more non-unit cluster dimensions for pre-SM90 architectures)?
- Is the kernel schedule being used supported on the architecture in question?
:param td: tile description to validate
:type td: cutlass.backend.TileDescription
:return: tuple in which the first element is a bool indicating that the tile description is valid
and the second element is a string providing an optional error message.
:rtype: tuple
"""
valid, msg = check.valid_stage_count(self.cc, self.current_cc, td)
if not valid:
return (valid, msg)
valid, msg = check.valid_cluster_shape(self.current_cc, td.cluster_shape)
if not valid:
return (valid, msg)
return valid, msg
def tile_descriptions(self) -> list:
"""
Returns a list of valid tile descriptions for the operations
:returns: list of valid tile descriptions for the operations
:rtype: list
"""
descriptions = []
description_str = []
for op in self.possible_operations.all_operations:
td = datatypes.td_from_profiler_op(op)
if self._math_operation is not None:
if td.math_instruction.math_operation != self._math_operation:
continue
if str(td) not in description_str:
description_str.append(str(td))
descriptions.append(td)
return descriptions
#
# Swizzling functor Related
#
@property
def swizzling_stride(self):
"""
Returns the stride of swizzling currently being used by the Conv2d
:return: swizzing stride
"""
return self._swizzling_stride
@swizzling_stride.setter
def swizzling_stride(self, stride: int):
"""
Sets the swizzling functor to the type specified by `swizzling_functor`
"""
if not isinstance(stride, int):
raise Exception(f"Expect integer (1, 2, 4, 8), got {stride}")
self._swizzling_stride = stride
def _propose_swizzling_functor(self, stride):
"""
Automatically propose the swizzling functor based on the stride
"""
if self.conv_kind == ConvKind.Dgrad:
if stride[0] != 1 or stride[1] != 1:
return getattr(cutlass.swizzle, f"StridedDgradIdentitySwizzle{self._swizzling_stride}")
return getattr(cutlass.swizzle, f"IdentitySwizzle{self._swizzling_stride}")
#
# Iterator Algorithm Related
#
@property
def iterator_algorithm(self) -> IteratorAlgorithm:
"""
Returns the iterator algorithm
"""
return self._iterator_algorithm
@iterator_algorithm.setter
def iterator_algorithm(self, alg: str):
"""
Sets the iterator algorithm
:param alg: The iterator algorithm
:type td: string, options: "analytic", "optimized", "few_channels", and "fixed_channels"
"""
iterator_alg = datatypes.getattr_enum(IteratorAlgorithm, alg)
# Check if the iterator algorithm is valid
if iterator_alg in [IteratorAlgorithm.FewChannels, IteratorAlgorithm.FixedChannels] and self.conv_kind != ConvKind.Fprop:
raise Exception(f"{self.conv_kind} does not support iterator algorithm {alg}.")
self._iterator_algorithm = iterator_alg
def _propose_iterator_algorithm(self, problem_size, alignment_a, alignment_b) -> IteratorAlgorithm:
"""
Propose a valid iterator algorithm based on problem size and alignment
"""
if self.conv_kind == ConvKind.Fprop:
# Check whether the fixed channel is applicable
if problem_size.C == alignment_a:
return IteratorAlgorithm.FixedChannels
elif (problem_size.C % alignment_a == 0 and
problem_size.R <= 32 and problem_size.S <= 32):
return IteratorAlgorithm.Optimized
else:
return IteratorAlgorithm.Analytic
elif self.conv_kind == ConvKind.Dgrad:
if (problem_size.K % alignment_a == 0 and
problem_size.R <= 32 and problem_size.S <= 32 and
problem_size.C % alignment_b == 0):
return IteratorAlgorithm.Optimized
else:
return IteratorAlgorithm.Analytic
elif self.conv_kind == ConvKind.Wgrad:
if (problem_size.K % alignment_a == 0 and
problem_size.C % alignment_b == 0):
return IteratorAlgorithm.Optimized
else:
return IteratorAlgorithm.Analytic
def _validate_iterator_algorithm(self, iterator_algorithm, problem_size, alignment_a, alignment_b) -> bool:
"""
Validate whether the user provide iterator algorithm works for the given problem size
"""
if self.conv_kind == ConvKind.Fprop:
if iterator_algorithm == IteratorAlgorithm.FixedChannels:
return problem_size.C == alignment_a
elif iterator_algorithm == IteratorAlgorithm.Optimized:
return (problem_size.C % alignment_a == 0 and
problem_size.R <= 32 and problem_size.S <= 32)
elif iterator_algorithm == IteratorAlgorithm.FewChannels:
return problem_size.C % alignment_a == 0
elif self.conv_kind == ConvKind.Dgrad:
if iterator_algorithm == IteratorAlgorithm.Optimized:
return (problem_size.K % alignment_a == 0 and
problem_size.R <= 32 and problem_size.S <= 32 and
problem_size.C % alignment_b == 0)
elif self.conv_kind == ConvKind.Wgrad:
if iterator_algorithm == IteratorAlgorithm.Optimized:
return (problem_size.K % alignment_a == 0 and
problem_size.C % alignment_b == 0)
return True
#
# Stride Support Related
#
def _propose_stride_support(self, stride):
if self.conv_kind == ConvKind.Dgrad:
if stride[0] == 1 and stride[1] == 1:
return StrideSupport.Unity
return StrideSupport.Strided
#
# Construct and Compilation
#
def construct(
self, tile_description: TileDescription = None,
alignment_A: int = None, alignment_B: int = None, alignment_C: int = None,
iterator_algorithm: IteratorAlgorithm = None,
stride_support = None, swizzling_functor: cutlass.swizzle = None,
epilogue_functor=None) -> cutlass.backend.Conv2dOperation:
"""
Constructs a ``cutlass.backend.Conv2dOperation`` based on the input parameters and current
kernel specification of the ``Conv2d`` object.
:param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:param iterator_algorithm: the iterator algorithm used
:type iterator_algorithm: cutlass_library.library.IteratorAlgorithm
:param stride_support: the stride support of dgrad
:type stride_support: cutlass_library.library.StrideSupport
:param swizzling_functor: the swizzling functor
:type swizzling_functor: cutlass.swizzle
:param epilogue_functor: the epilogue functor
:return: operation that was constructed
:rtype: cutlass.backend.Conv2dOperation
"""
# Get alignment
alignment_A = check.alignment_or_default(alignment_A, self.alignment_pref_A)
alignment_B = check.alignment_or_default(alignment_B, self.alignment_pref_B)
alignment_C = check.alignment_or_default(alignment_C, self.alignment_pref_C)
tensor_A = TensorDescription(self._element_a, self._layout_b, alignment_A)
tensor_B = TensorDescription(self._element_b, self._layout_b, alignment_B)
tensor_C = TensorDescription(self._element_c, self._layout_c, alignment_C)
if tile_description is None:
if self.tile_description is not None:
tile_description = self.tile_description
else:
op = self.possible_operations.operations(alignment_A, alignment_B, alignment_C, self._math_operation)[0]
tile_description = datatypes.td_from_profiler_op(op)
else:
valid, err_str = self._valid_tile_description(tile_description)
if not valid:
raise Exception(f"Invalid tile description. {err_str}")
self.tile_description = tile_description
if iterator_algorithm is None:
# If the iterator algorithm is already set
if self.iterator_algorithm is not None:
iterator_algorithm = self.iterator_algorithm
else:
# Otherwise, we conservatively use the analytic iterator for correctness
iterator_algorithm = IteratorAlgorithm.Analytic
if stride_support is None:
# If the stride support is already set
if self._stride_support is not None:
stride_support = self._stride_support
else:
# Otherwise, we assume strided
stride_support = StrideSupport.Strided
if swizzling_functor is None:
# If the swizzling functor is already set
swizzling_functor = self._propose_swizzling_functor(stride=(2, 2))
if epilogue_functor is None:
if self.epilogue_functor is not None:
epilogue_functor = self.epilogue_functor
else:
epilogue_functor = self._create_epilogue_functor_activation(self._activation)
# Reset the alignment of the epilogue functor
epilogue_functor = self._reset_epilogue_functor_alignment(alignment_C, epilogue_functor)
operation = Conv2dOperation(
conv_kind=self.conv_kind,
iterator_algorithm=iterator_algorithm,
arch=self.current_cc,
tile_description=tile_description,
A=tensor_A, B=tensor_B, C=tensor_C,
stride_support=stride_support,
epilogue_functor=epilogue_functor,
swizzling_functor=swizzling_functor,
)
return operation
def compile(self, tile_description: TileDescription = None,
alignment_A: int = None, alignment_B: int = None, alignment_C: int = None,
iterator_algorithm: IteratorAlgorithm = None,
stride_support = None, swizzling_functor: cutlass.swizzle = None,
epilogue_functor = None, print_module: bool = False) -> cutlass.backend.Conv2dOperation:
"""
Emits and compiles the kernel currently specified. If ``tile_description`` and any
of the ``alignment`` parameters are set, the kernel will be chosen using this
tile description and alignments. Otherwise, a default tile description and alignment
will be used.
::param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:param iterator_algorithm: the iterator algorithm used
:type iterator_algorithm: cutlass_library.library.IteratorAlgorithm
:param stride_support: the stride support of dgrad
:type stride_support: cutlass_library.library.StrideSupport
:param swizzling_functor: the swizzling functor
:type swizzling_functor: cutlass.swizzle
:param epilogue_functor: the epilogue functor
:return: operation that was compiled
:rtype: cutlass.backend.Conv2dOperation
"""
self.operation = self.construct(
tile_description, alignment_A, alignment_B, alignment_C,
iterator_algorithm, stride_support, swizzling_functor, epilogue_functor)
if print_module:
print(self.operation.rt_module.emit())
compiler.add_module([self.operation,])
return self.operation
#
# Run Related
#
def _verify_type_and_layout(self, tensor, ref_type, ref_layout, name):
"""
Verifies that ``tensor`` has data type ``ref_type`` and layout ``ref_layout``. An exception
is raised if it does not.
:param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type tensor: numpy/cupy/torch array/tensor object
:param ref_dtype: data type for the tensor that this object was initialized to
:param name: identifier of the tensor to verify. Used in raising exceptions
:type name: str
"""
dtype, _ = datatypes.get_datatype_and_layout(tensor)
if dtype != ref_type:
raise Exception(f'Tensor {name} with type and layout {dtype} '
f'does not match the expected type of {ref_type}.')
def _get_and_verify_conv_problem_size(self, A, B, C, stride, padding, dilation):
if self.conv_kind == ConvKind.Fprop:
input = A
weight = B
output = C
output_tensor = "C"
elif self.conv_kind == ConvKind.Dgrad:
output = A
weight = B
input = C
output_tensor = "A"
elif self.conv_kind == ConvKind.Wgrad:
output = A
input = B
weight = C
output_tensor = "A"
else:
raise Exception(f"Convolution kind {self.conv_kind} is not supported")
N_, H_, W_, C_ = datatypes.get_tensor_shape(input, op="CONV")
K_, R_, S_, _ = datatypes.get_tensor_shape(weight, op="CONV")
_, P_, Q_, _ = datatypes.get_tensor_shape(output, op="CONV")
problem_size = Conv2DProblemSize(
N_, H_, W_, C_,
K_, R_, S_, C_,
padding[0], padding[1],
stride[0], stride[1],
dilation[0], dilation[1],
ConvMode.CrossCorrelation,
1, 1
)
if P_ != problem_size.P or Q_ != problem_size.Q:
raise Exception(
f"Tensor {output_tensor} size should be ({N_}, {problem_size.P}, {problem_size.Q}, {K_}), got ({N_}, {P_}, {Q_}, {K_})")
return problem_size
def run(self, A=None, B=None, C=None, D=None,
stride=(1, 1), padding=(0, 0), dilation=(1, 1),
alpha=None, beta=None,
split_k=("serial", 1), sync: bool = True,
print_module: bool = False,
stream: cuda.CUstream = cuda.CUstream(0)) -> Conv2dArguments:
"""
Runs the kernel currently specified. If it has not already been, the kernel is emitted and
compiled. Tensors holding operands and outputs of the kernel are sourced either from the
``A``, ``B``, ``C``, ``D``, ``alpha``, and ``beta``
parameters provided in the call, or from those
passed in on the construction of this object -- one of the two must be specified.
By default, this call returns only once the kernel has completed. To launch the kernel
and immediately return, set ``sync=False``. In this case, it is the responsibility of the
caller to syncrhonize the results of the kernel before attempting to access outputs
by calling ``sync()`` on the arguments returned from this call.
:param A: tensor representing data type and layout of operand A
:param B: tensor representing data type and layout of operand B
:param C: tensor representing data type and layout of operand C
:param D: tensor representing data type and layout of operand D
:param stride: (stride_h, stride_w) describing the convolution stride. Default: (1, 1)
:param padding: (pad_h, pad_w) describing the convolution padding. Default: (0, 0)
:param dilation: (dilation_h, dilation_w) describing the dilation of convolution. Default: (1, 1)
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param split_k: a tuple (split_k_mode, split_k_slices)
:param sync: whether the call should wait for the kernel to complete before returning
:type sync: bool
:param print_module: whether to print the emitted C++ code
:type print_module: bool
:param stream: cuda stream, defaults to cuda.cuda.CUstream(0)
:type stream: :class:`cuda.cuda.CUstream`
:return: arguments passed in to the kernel
:rtype: cutlass.backend.Conv2dArguments
"""
super().run_setup()
A = self._verify_tensor(A, self.A, self._element_a, self._layout_a, "A")
B = self._verify_tensor(B, self.B, self._element_b, self._layout_b, "B")
C = self._verify_tensor(C, self.C, self._element_c, self._layout_c, "C")
D = self._verify_tensor(D, self.D, self._element_d, self._layout_d, "D")
alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha")
beta = self._verify_scalar(beta, self.beta, self._element_c, "beta")
# handle the case when there is no C
if C is None:
if beta != 0:
raise Exception(f"With beta {beta} != 0, C has to be provided.")
else:
C = D
# Construct problem size based on input
# It also verifies whether the A, B, C, D, stride, padding, and dilation are matching
problem_size = self._get_and_verify_conv_problem_size(A, B, C, stride, padding, dilation)
# Propose stride support based on input
stride_support = self._propose_stride_support(stride)
# Propose swizzling functor
swizzling_functor = self._propose_swizzling_functor(stride)
shape_a = datatypes.get_tensor_shape(A, op="CONV")
shape_b = datatypes.get_tensor_shape(B, op="CONV")
shape_c = datatypes.get_tensor_shape(C, op="CONV")
# Get the alignment
alignment_a = self.possible_operations.find_alignment(shape_a, self._layout_a, operand="A")
alignment_b = self.possible_operations.find_alignment(shape_b, self._layout_b, operand="B")
alignment_c = self.possible_operations.find_alignment(shape_c, self._layout_c, operand="C")
alignment_a = check.update_alignment(alignment_a, self.alignment_pref_A)
alignment_b = check.update_alignment(alignment_b, self.alignment_pref_B)
alignment_c = check.update_alignment(alignment_c, self.alignment_pref_C)
# Propose iterator algorithm based on input
if self._iterator_algorithm is None:
# Propose a default iterator algorithm based on the problem size
iterator_algorithm = self._propose_iterator_algorithm(problem_size, alignment_a, alignment_b)
else:
if (self._validate_iterator_algorithm(self._iterator_algorithm, problem_size, alignment_a, alignment_b)):
iterator_algorithm = self._iterator_algorithm
else:
raise Exception(f"Iterator algorithm {self._iterator_algorithm} is invalid for current problem.")
epilogue_args = [alpha, beta]
if hasattr(self, "_activation_args"):
if isinstance(self._activation_args, list):
epilogue_args += self._activation_args
else:
epilogue_args.append(self._activation_args)
if split_k[0] == "parallel" and split_k[1] > 1:
epilogue_functor = self._create_epilogue_functor_activation(epilogue.identity)
else:
epilogue_functor = self.epilogue_functor
# The alignment is determined by the iterator function (I believe)
self.compile(tile_description=self.tile_description, alignment_A=alignment_a, alignment_B=alignment_b,
alignment_C=alignment_c, iterator_algorithm=iterator_algorithm, stride_support=stride_support,
swizzling_functor=swizzling_functor, epilogue_functor=epilogue_functor, print_module=print_module)
# Create reduction operation for parallel split-k
if split_k[0] == "parallel" and split_k[1] > 1:
epilogue_functor_reduction = self._reset_epilogue_functor_alignment(alignment_c, self.epilogue_functor)
self.reduction_operation = ReductionOperation(
shape=MatrixCoord(4, 32 * alignment_c), C=self.operation.C,
element_accumulator=self._element_accumulator,
element_compute=self._element_accumulator,
epilogue_functor=epilogue_functor_reduction,
count=alignment_c
)
if print_module:
print(self.reduction_operation.rt_module.emit())
compiler.add_module([self.reduction_operation,])
arguments = Conv2dArguments(
operation=self.operation, problem_size=problem_size,
A=A, B=B, C=C, D=D,
output_op=self.operation.epilogue_type(*epilogue_args),
split_k_mode=datatypes.getattr_enum(SplitKMode, split_k[0]),
split_k_slices=split_k[1],
stream=stream
)
self.operation.run(arguments)
if split_k[0] == "parallel" and split_k[1] > 1:
implicit_gemm_size = arguments.problem_size.implicit_gemm_size(self.conv_kind)
reduction_arguments = ReductionArguments(
self.reduction_operation,
problem_size=[implicit_gemm_size.m, implicit_gemm_size.n],
partitions=split_k[1],
workspace=arguments.ptr_D,
destination=D,
source=C,
output_op=self.reduction_operation.epilogue_type(*epilogue_args),
stream=stream
)
self.reduction_operation.run(reduction_arguments)
if sync:
if split_k[0] == "parallel" and split_k[1] > 1:
reduction_arguments.sync()
# Free memory allocated by args because we are not
# calling `arguments.sync()` in this case (which will free memory)
arguments.free()
else:
arguments.sync()
return arguments
#
# Helper functions
#
@staticmethod
def output_size(input_size, weight_size, padding, stride, dilation):
problem_size = Conv2DProblemSize(
*input_size,
*weight_size,
padding[0], padding[1],
stride[0], stride[1],
dilation[0], dilation[1],
ConvMode.CrossCorrelation,
1, 1
)
return (problem_size.N, problem_size.P, problem_size.Q, problem_size.K)
#
# Easy to use interfaces for fprop, wgrad, and dgrad
#
class Conv2dFprop(Conv2d):
def __init__(
self,
input=None, weight=None, C=None, output=None, alpha=1, beta=0,
element=None,
element_input=None, element_weight=None, element_C=None, element_output=None,
element_accumulator=None,
cc: int = None, kernel_cc: int = None):
A, B, D = input, weight, output
element_A, element_B, element_D = element_input, element_weight, element_output
super().__init__(
"fprop", A, B, C, D, alpha, beta, element,
element_A, element_B, element_C, element_D,
element_accumulator, cc, kernel_cc)
def run(
self, input=None, weight=None, C=None, output=None, alpha=None, beta=None,
stride=(1, 1), padding=(0, 0), dilation=(1, 1), split_k=("serial", 1),
sync: bool = True, print_module: bool = False,
stream: cuda.CUstream = cuda.CUstream(0)) -> Conv2dArguments:
A, B, D = input, weight, output
return super().run(
A, B, C, D, alpha, beta, stride, padding, dilation, split_k, sync, print_module, stream)
class Conv2dDgrad(Conv2d):
def __init__(
self,
grad_output=None, weight=None, C=None, grad_input=None, alpha=1, beta=0,
element=None,
element_grad_output=None, element_weight=None, element_C=None, element_grad_input=None,
element_accumulator=None,
cc: int = None, kernel_cc: int = None):
A, B, D = grad_output, weight, grad_input
element_A, element_B, element_D = element_grad_output, element_weight, element_grad_input
super().__init__(
"dgrad", A, B, C, D, alpha, beta, element,
element_A, element_B, element_C, element_D,
element_accumulator, cc, kernel_cc)
def run(self, grad_output=None, weight=None, C=None, grad_input=None, alpha=None, beta=None,
stride=(1, 1), padding=(0, 0), dilation=(1, 1), split_k=("serial", 1),
sync: bool = True, print_module: bool = False,
stream: cuda.CUstream = cuda.CUstream(0)) -> Conv2dArguments:
#
A, B, D = grad_output, weight, grad_input
return super().run(
A, B, C, D, alpha, beta, stride, padding, dilation, split_k, sync, print_module, stream)
class Conv2dWgrad(Conv2d):
def __init__(
self,
grad_output=None, input=None, C=None, grad_weight=None, alpha=1, beta=0,
element=None,
element_grad_output=None, element_input=None, element_C=None, element_grad_weight=None,
element_accumulator=None,
cc: int = None, kernel_cc: int = None):
A, B, D = grad_output, input, grad_weight
element_A, element_B, element_D = element_grad_output, element_input, element_grad_weight
super().__init__(
"wgrad", A, B, C, D, alpha, beta, element,
element_A, element_B, element_C, element_D,
element_accumulator, cc, kernel_cc)
def run(self, grad_output=None, input=None, C=None, grad_weight=None, alpha=None, beta=None,
stride=(1, 1), padding=(0, 0), dilation=(1, 1), split_k=("serial", 1),
sync: bool = True, print_module: bool = False,
stream: cuda.CUstream = cuda.CUstream(0)) -> Conv2dArguments:
#
A, B, D = grad_output, input, grad_weight
return super().run(
A, B, C, D, alpha, beta, stride, padding, dilation, split_k, sync, print_module, stream)
| python/cutlass/op/conv.py/0 | {
"file_path": "python/cutlass/op/conv.py",
"repo_id": "python",
"token_count": 18073
} | 40 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Data types and tags used for emitting CUTLASS C++ kernels
"""
import enum
import re
# The following block implements enum.auto() for Python 3.5 variants that don't include it such
# as the default 3.5.2 on Ubuntu 16.04.
#
# https://codereview.stackexchange.com/questions/177309/reimplementing-pythons-enum-auto-for-compatibility
try:
from enum import auto as enum_auto
except ImportError:
__cutlass_library_auto_enum = 0
def enum_auto() -> int:
global __cutlass_library_auto_enum
i = __cutlass_library_auto_enum
__cutlass_library_auto_enum += 1
return i
###################################################################################################
#
class GeneratorTarget(enum.Enum):
Library = enum_auto()
#
GeneratorTargetNames = {
GeneratorTarget.Library: 'library'
}
#
###################################################################################################
#
class DataType(enum.Enum):
void = enum_auto() # primarily used to disable C tensor for epilogues
b1 = enum_auto()
u2 = enum_auto()
u4 = enum_auto()
u8 = enum_auto()
u16 = enum_auto()
u32 = enum_auto()
u64 = enum_auto()
s4 = enum_auto()
s8 = enum_auto()
s16 = enum_auto()
s32 = enum_auto()
s64 = enum_auto()
e4m3 = enum_auto()
e5m2 = enum_auto()
f16 = enum_auto()
bf16 = enum_auto()
f32 = enum_auto()
tf32 = enum_auto()
f64 = enum_auto()
cf16 = enum_auto()
cbf16 = enum_auto()
cf32 = enum_auto()
ctf32 = enum_auto()
cf64 = enum_auto()
cs4 = enum_auto()
cs8 = enum_auto()
cs16 = enum_auto()
cs32 = enum_auto()
cs64 = enum_auto()
cu4 = enum_auto()
cu8 = enum_auto()
cu16 = enum_auto()
cu32 = enum_auto()
cu64 = enum_auto()
invalid = enum_auto()
#
ShortDataTypeNames = {
DataType.s32: 'i',
DataType.e4m3: 'e4m3',
DataType.e5m2: 'e5m2',
DataType.f16: 'h',
DataType.f32: 's',
DataType.f64: 'd',
DataType.cf32: 'c',
DataType.cf64: 'z',
}
#
DataTypeNames = {
DataType.void: "void",
DataType.b1: "b1",
DataType.u2: "u2",
DataType.u4: "u4",
DataType.u8: "u8",
DataType.u16: "u16",
DataType.u32: "u32",
DataType.u64: "u64",
DataType.s4: "s4",
DataType.s8: "s8",
DataType.s16: "s16",
DataType.s32: "s32",
DataType.s64: "s64",
DataType.e4m3: 'e4m3',
DataType.e5m2: 'e5m2',
DataType.f16: "f16",
DataType.bf16: "bf16",
DataType.f32: "f32",
DataType.tf32: "tf32",
DataType.f64: "f64",
DataType.cf16: "cf16",
DataType.cbf16: "cbf16",
DataType.cf32: "cf32",
DataType.ctf32: "ctf32",
DataType.cf64: "cf64",
DataType.cu4: "cu4",
DataType.cu8: "cu8",
DataType.cu16: "cu16",
DataType.cu32: "cu32",
DataType.cu64: "cu64",
DataType.cs4: "cs4",
DataType.cs8: "cs8",
DataType.cs16: "cs16",
DataType.cs32: "cs32",
DataType.cs64: "cs64",
}
DataTypeTag = {
DataType.void: "void",
DataType.b1: "cutlass::uint1b_t",
DataType.u2: "cutlass::uint2b_t",
DataType.u4: "cutlass::uint4b_t",
DataType.u8: "uint8_t",
DataType.u16: "uint16_t",
DataType.u32: "uint32_t",
DataType.u64: "uint64_t",
DataType.s4: "cutlass::int4b_t",
DataType.s8: "int8_t",
DataType.s16: "int16_t",
DataType.s32: "int32_t",
DataType.s64: "int64_t",
DataType.e4m3: 'cutlass::float_e4m3_t',
DataType.e5m2: 'cutlass::float_e5m2_t',
DataType.f16: "cutlass::half_t",
DataType.bf16: "cutlass::bfloat16_t",
DataType.f32: "float",
DataType.tf32: "cutlass::tfloat32_t",
DataType.f64: "double",
DataType.cf16: "cutlass::complex<cutlass::half_t>",
DataType.cbf16: "cutlass::complex<cutlass::bfloat16_t>",
DataType.cf32: "cutlass::complex<float>",
DataType.ctf32: "cutlass::complex<cutlass::tfloat32_t>",
DataType.cf64: "cutlass::complex<double>",
DataType.cu4: "cutlass::complex<cutlass::uint4b_t>",
DataType.cu8: "cutlass::complex<cutlass::uint8_t>",
DataType.cu16: "cutlass::complex<cutlass::uint16_t>",
DataType.cu32: "cutlass::complex<cutlass::uint32_t>",
DataType.cu64: "cutlass::complex<cutlass::uint64_t>",
DataType.cs4: "cutlass::complex<cutlass::int4b_t>",
DataType.cs8: "cutlass::complex<cutlass::int8_t>",
DataType.cs16: "cutlass::complex<cutlass::int16_t>",
DataType.cs32: "cutlass::complex<cutlass::int32_t>",
DataType.cs64: "cutlass::complex<cutlass::int64_t>",
}
DataTypeSize = {
DataType.void: 0,
DataType.b1: 1,
DataType.u2: 2,
DataType.u4: 4,
DataType.u8: 8,
DataType.u16: 16,
DataType.u32: 32,
DataType.u64: 64,
DataType.s4: 4,
DataType.s8: 8,
DataType.s16: 16,
DataType.s32: 32,
DataType.s64: 64,
DataType.e4m3: 8,
DataType.e5m2: 8,
DataType.f16: 16,
DataType.bf16: 16,
DataType.f32: 32,
DataType.tf32: 32,
DataType.f64: 64,
DataType.cf16: 32,
DataType.cbf16: 32,
DataType.cf32: 64,
DataType.ctf32: 32,
DataType.cf64: 128,
DataType.cu4: 8,
DataType.cu8: 16,
DataType.cu16: 32,
DataType.cu32: 64,
DataType.cu64: 128,
DataType.cs4: 8,
DataType.cs8: 16,
DataType.cs16: 32,
DataType.cs32: 64,
DataType.cs64: 128,
}
###################################################################################################
#
class BlasMode(enum.Enum):
symmetric = enum_auto()
hermitian = enum_auto()
#
BlasModeTag = {
BlasMode.symmetric: 'cutlass::BlasMode::kSymmetric',
BlasMode.hermitian: 'cutlass::BlasMode::kHermitian',
}
#
class ComplexTransform(enum.Enum):
none = enum_auto()
conj = enum_auto()
#
ComplexTransformTag = {
ComplexTransform.none: 'cutlass::ComplexTransform::kNone',
ComplexTransform.conj: 'cutlass::ComplexTransform::kConjugate',
}
# Used for cutlass3x complex kernel collective mainloop builder instantiation
ComplexTransformTag3x = {
ComplexTransform.none: 'cute::identity',
ComplexTransform.conj: 'cute::conjugate',
}
#
RealComplexBijection = [
(DataType.f16, DataType.cf16),
(DataType.f32, DataType.cf32),
(DataType.f64, DataType.cf64),
]
#
def is_complex(data_type):
for r, c in RealComplexBijection:
if data_type == c:
return True
return False
#
def get_complex_from_real(real_type):
for r, c in RealComplexBijection:
if real_type == r:
return c
return DataType.invalid
#
def get_real_from_complex(complex_type):
for r, c in RealComplexBijection:
if complex_type == c:
return r
return DataType.invalid
#
class ComplexMultiplyOp(enum.Enum):
multiply_add = enum_auto()
gaussian = enum_auto()
###################################################################################################
#
class MathOperation(enum.Enum):
multiply_add = enum_auto()
multiply_add_saturate = enum_auto()
multiply_add_mixed_input_upcast = enum_auto()
xor_popc = enum_auto()
and_popc = enum_auto()
multiply_add_fast_bf16 = enum_auto()
multiply_add_fast_f16 = enum_auto()
multiply_add_fast_f32 = enum_auto()
multiply_add_complex_fast_f32 = enum_auto()
multiply_add_complex = enum_auto()
multiply_add_complex_gaussian = enum_auto()
multiply_add_fast_accum = enum_auto()
#
MathOperationTag = {
MathOperation.multiply_add: 'cutlass::arch::OpMultiplyAdd',
MathOperation.multiply_add_saturate: 'cutlass::arch::OpMultiplyAddSaturate',
MathOperation.multiply_add_mixed_input_upcast: 'cutlass::arch::OpMultiplyAddMixedInputUpcast',
MathOperation.xor_popc: 'cutlass::arch::OpXorPopc',
MathOperation.and_popc: 'cutlass::arch::OpAndPopc',
MathOperation.multiply_add_fast_bf16: 'cutlass::arch::OpMultiplyAddFastBF16',
MathOperation.multiply_add_fast_f16: 'cutlass::arch::OpMultiplyAddFastF16',
MathOperation.multiply_add_fast_f32: 'cutlass::arch::OpMultiplyAddFastF32',
MathOperation.multiply_add_complex_fast_f32: 'cutlass::arch::OpMultiplyAddComplexFastF32',
MathOperation.multiply_add_complex: 'cutlass::arch::OpMultiplyAddComplex',
MathOperation.multiply_add_complex_gaussian: 'cutlass::arch::OpMultiplyAddGaussianComplex',
MathOperation.multiply_add_fast_accum: 'cutlass::arch::OpMultiplyAddFastAccum',
}
###################################################################################################
#
class LayoutType(enum.Enum):
ColumnMajor = enum_auto()
RowMajor = enum_auto()
ColumnMajorInterleaved2 = enum_auto()
RowMajorInterleaved2 = enum_auto()
ColumnMajorInterleaved32 = enum_auto()
RowMajorInterleaved32 = enum_auto()
ColumnMajorInterleaved64 = enum_auto()
RowMajorInterleaved64 = enum_auto()
TensorNWC = enum_auto()
TensorNHWC = enum_auto()
TensorNDHWC = enum_auto()
TensorNCHW = enum_auto()
TensorNGHWC = enum_auto()
TensorNC32HW32 = enum_auto()
TensorNC64HW64 = enum_auto()
TensorC32RSK32 = enum_auto()
TensorC64RSK64 = enum_auto()
TensorKCS = enum_auto()
TensorKCSR = enum_auto()
TensorKCSRT = enum_auto()
#
LayoutTag = {
LayoutType.ColumnMajor: 'cutlass::layout::ColumnMajor',
LayoutType.RowMajor: 'cutlass::layout::RowMajor',
LayoutType.ColumnMajorInterleaved2: 'cutlass::layout::ColumnMajorInterleaved<2>',
LayoutType.RowMajorInterleaved2: 'cutlass::layout::RowMajorInterleaved<2>',
LayoutType.ColumnMajorInterleaved32: 'cutlass::layout::ColumnMajorInterleaved<32>',
LayoutType.RowMajorInterleaved32: 'cutlass::layout::RowMajorInterleaved<32>',
LayoutType.ColumnMajorInterleaved64: 'cutlass::layout::ColumnMajorInterleaved<64>',
LayoutType.RowMajorInterleaved64: 'cutlass::layout::RowMajorInterleaved<64>',
LayoutType.TensorNWC: 'cutlass::layout::TensorNWC',
LayoutType.TensorNHWC: 'cutlass::layout::TensorNHWC',
LayoutType.TensorNDHWC: 'cutlass::layout::TensorNDHWC',
LayoutType.TensorNCHW: 'cutlass::layout::TensorNCHW',
LayoutType.TensorNGHWC: 'cutlass::layout::TensorNGHWC',
LayoutType.TensorNC32HW32: 'cutlass::layout::TensorNCxHWx<32>',
LayoutType.TensorC32RSK32: 'cutlass::layout::TensorCxRSKx<32>',
LayoutType.TensorNC64HW64: 'cutlass::layout::TensorNCxHWx<64>',
LayoutType.TensorC64RSK64: 'cutlass::layout::TensorCxRSKx<64>',
LayoutType.TensorKCS: 'cutlass::layout::TensorKCS',
LayoutType.TensorKCSR: 'cutlass::layout::TensorKCSR',
LayoutType.TensorKCSRT: 'cutlass::layout::TensorKCSRT'
}
#
TransposedLayout = {
LayoutType.ColumnMajor: LayoutType.RowMajor,
LayoutType.RowMajor: LayoutType.ColumnMajor,
LayoutType.ColumnMajorInterleaved2: LayoutType.RowMajorInterleaved2,
LayoutType.RowMajorInterleaved2: LayoutType.ColumnMajorInterleaved2,
LayoutType.ColumnMajorInterleaved32: LayoutType.RowMajorInterleaved32,
LayoutType.RowMajorInterleaved32: LayoutType.ColumnMajorInterleaved32,
LayoutType.ColumnMajorInterleaved64: LayoutType.RowMajorInterleaved64,
LayoutType.RowMajorInterleaved64: LayoutType.ColumnMajorInterleaved64,
LayoutType.TensorNHWC: LayoutType.TensorNHWC
}
#
ShortLayoutTypeNames = {
LayoutType.ColumnMajor: 'n',
LayoutType.ColumnMajorInterleaved2: 'n2',
LayoutType.ColumnMajorInterleaved32: 'n32',
LayoutType.ColumnMajorInterleaved64: 'n64',
LayoutType.RowMajor: 't',
LayoutType.RowMajorInterleaved2: 't2',
LayoutType.RowMajorInterleaved32: 't32',
LayoutType.RowMajorInterleaved64: 't64',
LayoutType.TensorNWC: 'nwc',
LayoutType.TensorNHWC: 'nhwc',
LayoutType.TensorNDHWC: 'ndhwc',
LayoutType.TensorNCHW: 'nchw',
LayoutType.TensorNGHWC: 'nghwc',
LayoutType.TensorNC32HW32: 'nc32hw32',
LayoutType.TensorNC64HW64: 'nc64hw64',
LayoutType.TensorC32RSK32: 'c32rsk32',
LayoutType.TensorC64RSK64: 'c64rsk64',
LayoutType.TensorKCS: 'kcs',
LayoutType.TensorKCSR: 'kcsr',
LayoutType.TensorKCSRT: 'kcsrt'
}
#
ShortComplexLayoutNames = {
(LayoutType.ColumnMajor, ComplexTransform.none): 'n',
(LayoutType.ColumnMajor, ComplexTransform.conj): 'c',
(LayoutType.RowMajor, ComplexTransform.none): 't',
(LayoutType.RowMajor, ComplexTransform.conj): 'h'
}
###################################################################################################
class KernelScheduleType(enum.Enum):
ScheduleAuto = enum_auto()
Multistage = enum_auto()
CpAsyncWarpSpecialized = enum_auto()
CpAsyncWarpSpecializedPingpong = enum_auto()
CpAsyncWarpSpecializedCooperative = enum_auto()
Tma = enum_auto()
TmaWarpSpecialized = enum_auto()
TmaWarpSpecializedPingpong = enum_auto()
TmaWarpSpecializedCooperative = enum_auto()
TmaWarpSpecializedFP8FastAccum = enum_auto()
TmaWarpSpecializedCooperativeFP8FastAccum = enum_auto()
TmaWarpSpecializedPingpongFP8FastAccum = enum_auto()
ImplicitTmaWarpSpecializedSm90 = enum_auto()
#
KernelScheduleTag = {
KernelScheduleType.ScheduleAuto: 'cutlass::gemm::collective::KernelScheduleAuto',
KernelScheduleType.Multistage: 'cutlass::gemm::KernelMultistage',
KernelScheduleType.CpAsyncWarpSpecialized: 'cutlass::gemm::KernelCpAsyncWarpSpecialized',
KernelScheduleType.CpAsyncWarpSpecializedPingpong: 'cutlass::gemm::KernelCpAsyncWarpSpecializedPingpong',
KernelScheduleType.CpAsyncWarpSpecializedCooperative: 'cutlass::gemm::KernelCpAsyncWarpSpecializedCooperative',
KernelScheduleType.Tma: 'cutlass::gemm::KernelTma',
KernelScheduleType.TmaWarpSpecialized: 'cutlass::gemm::KernelTmaWarpSpecialized',
KernelScheduleType.TmaWarpSpecializedPingpong: 'cutlass::gemm::KernelTmaWarpSpecializedPingpong',
KernelScheduleType.TmaWarpSpecializedCooperative: 'cutlass::gemm::KernelTmaWarpSpecializedCooperative',
KernelScheduleType.TmaWarpSpecializedFP8FastAccum: 'cutlass::gemm::KernelTmaWarpSpecializedFP8FastAccum',
KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum: 'cutlass::gemm::KernelTmaWarpSpecializedCooperativeFP8FastAccum',
KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum: 'cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum',
KernelScheduleType.ImplicitTmaWarpSpecializedSm90: 'cutlass::conv::KernelImplicitTmaWarpSpecializedSm90',
}
#
KernelScheduleSuffixes = {
KernelScheduleType.ScheduleAuto: '',
KernelScheduleType.Multistage: '_cpasync',
KernelScheduleType.CpAsyncWarpSpecialized: '_cpasync_warpspecialized',
KernelScheduleType.CpAsyncWarpSpecializedPingpong: '_cpasync_warpspecialized_pingpong',
KernelScheduleType.CpAsyncWarpSpecializedCooperative: '_cpasync_warpspecialized_cooperative',
KernelScheduleType.Tma: '_unspecialized',
KernelScheduleType.TmaWarpSpecialized: '_warpspecialized',
KernelScheduleType.TmaWarpSpecializedPingpong: '_warpspecialized_pingpong',
KernelScheduleType.TmaWarpSpecializedCooperative: '_warpspecialized_cooperative',
KernelScheduleType.TmaWarpSpecializedFP8FastAccum: '_warpspecialized_fp8_fastaccum',
KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum: '_warpspecialized_cooperative_fp8_fastaccum',
KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum: '_warpspecialized_pingpong_fp8_fastaccum',
KernelScheduleType.ImplicitTmaWarpSpecializedSm90: '_warpspecialized',
}
class EpilogueScheduleType(enum.Enum):
ScheduleAuto = enum_auto()
EpilogueTransposed = enum_auto()
NoSmemWarpSpecialized = enum_auto()
TmaWarpSpecialized = enum_auto()
TmaWarpSpecializedCooperative = enum_auto()
#
EpilogueScheduleTag = {
EpilogueScheduleType.ScheduleAuto: 'cutlass::epilogue::collective::EpilogueScheduleAuto',
EpilogueScheduleType.EpilogueTransposed: 'cutlass::gemm::EpilogueTransposed',
EpilogueScheduleType.NoSmemWarpSpecialized: 'cutlass::epilogue::NoSmemWarpSpecialized',
EpilogueScheduleType.TmaWarpSpecialized: 'cutlass::epilogue::TmaWarpSpecialized',
EpilogueScheduleType.TmaWarpSpecializedCooperative: 'cutlass::epilogue::TmaWarpSpecializedCooperative',
}
#
EpilogueScheduleSuffixes = {
EpilogueScheduleType.ScheduleAuto: '',
EpilogueScheduleType.EpilogueTransposed: '',
EpilogueScheduleType.NoSmemWarpSpecialized: '_epi_nosmem',
EpilogueScheduleType.TmaWarpSpecialized: '_epi_tma',
EpilogueScheduleType.TmaWarpSpecializedCooperative: '_epi_tma',
}
class EpilogueFunctor3x(enum.Enum):
LinearCombination = enum_auto()
#
EpilogueFunctor3xTag = {
EpilogueFunctor3x.LinearCombination: 'cutlass::epilogue::fusion::LinearCombination',
}
class TileSchedulerType(enum.Enum):
Default = enum_auto()
Persistent = enum_auto()
StreamK = enum_auto()
#
TileSchedulerTag = {
TileSchedulerType.Default: 'void',
TileSchedulerType.Persistent: 'cutlass::gemm::PersistentScheduler',
TileSchedulerType.StreamK: 'cutlass::gemm::StreamKScheduler',
}
#
TileSchedulerSuffixes = {
TileSchedulerType.Default: '',
TileSchedulerType.Persistent: '',
TileSchedulerType.StreamK: '_stream_k',
}
###################################################################################################
#
class SideMode(enum.Enum):
Left = enum_auto()
Right = enum_auto()
#
SideModeTag = {
SideMode.Left: 'cutlass::SideMode::kLeft',
SideMode.Right: 'cutlass::SideMode::kRight'
}
#
ShortSideModeNames = {
SideMode.Left: 'ls',
SideMode.Right: 'rs'
}
###################################################################################################
#
class FillMode(enum.Enum):
Lower = enum_auto()
Upper = enum_auto()
#
FillModeTag = {
FillMode.Lower: 'cutlass::FillMode::kLower',
FillMode.Upper: 'cutlass::FillMode::kUpper'
}
#
ShortFillModeNames = {
FillMode.Lower: 'l',
FillMode.Upper: 'u'
}
###################################################################################################
#
class DiagType(enum.Enum):
NonUnit = enum_auto()
Unit = enum_auto()
#
DiagTypeTag = {
DiagType.NonUnit: 'cutlass::DiagType::kNonUnit',
DiagType.Unit: 'cutlass::DiagType::kUnit'
}
#
ShortDiagTypeNames = {
DiagType.NonUnit: 'nu',
DiagType.Unit: 'un'
}
###################################################################################################
#
class OpcodeClass(enum.Enum):
Simt = enum_auto()
TensorOp = enum_auto()
WmmaTensorOp = enum_auto()
SparseTensorOp = enum_auto()
OpcodeClassNames = {
OpcodeClass.Simt: 'simt',
OpcodeClass.TensorOp: 'tensorop',
OpcodeClass.WmmaTensorOp: 'wmma_tensorop',
}
OpcodeClassTag = {
OpcodeClass.Simt: 'cutlass::arch::OpClassSimt',
OpcodeClass.TensorOp: 'cutlass::arch::OpClassTensorOp',
OpcodeClass.WmmaTensorOp: 'cutlass::arch::OpClassWmmaTensorOp',
}
###################################################################################################
#
class OperationKind(enum.Enum):
Gemm = enum_auto()
RankK = enum_auto()
Rank2K = enum_auto()
Trmm = enum_auto()
Symm = enum_auto()
Conv2d = enum_auto()
Conv3d = enum_auto()
#
OperationKindNames = {
OperationKind.Gemm: 'gemm'
, OperationKind.RankK: 'rank_k'
, OperationKind.Rank2K: 'rank_2k'
, OperationKind.Trmm: 'trmm'
, OperationKind.Symm: 'symm'
, OperationKind.Conv2d: 'conv2d'
, OperationKind.Conv3d: 'conv3d'
}
#
class Target(enum.Enum):
library = enum_auto()
#
ArchitectureNames = {
50: 'maxwell',
60: 'pascal',
61: 'pascal',
70: 'volta',
75: 'turing',
80: 'ampere',
89: 'ada',
90: 'hopper'
}
#
SharedMemPerCC = {
70: 96, # 96KB of SMEM
72: 96, # 96KB of SMEM
75: 64, # 64KB of SMEM
80: 163, # 163KB of SMEM - 1KB reserved for the driver
86: 99, # 99KB of SMEM - 1KB reserved for the driver
87: 163, # 163KB of SMEM - 1KB reserved for the driver
89: 99, # 99KB of SMEM - 1KB reserved for the driver
90: 227, # 227KB of SMEM - 1KB reserved for the driver
}
###################################################################################################
#
def SubstituteTemplate(template, values):
text = template
changed = True
while changed:
changed = False
for key, value in values.items():
regex = "\\$\\{%s\\}" % key
newtext = re.sub(regex, value, text)
if newtext != text:
changed = True
text = newtext
return text
###################################################################################################
#
class GemmKind(enum.Enum):
Gemm = enum_auto()
Sparse = enum_auto()
Universal = enum_auto()
Universal3x = enum_auto()
SparseUniversal3x = enum_auto()
PlanarComplex = enum_auto()
PlanarComplexArray = enum_auto()
Grouped = enum_auto()
#
GemmKindNames = {
GemmKind.Gemm: "gemm",
GemmKind.Sparse: "spgemm",
GemmKind.Universal: "gemm",
GemmKind.Universal3x: "gemm",
GemmKind.SparseUniversal3x: "spgemm",
GemmKind.PlanarComplex: "gemm_planar_complex",
GemmKind.PlanarComplexArray: "gemm_planar_complex_array",
GemmKind.Grouped: "gemm_grouped",
}
#
class RankKKind(enum.Enum):
Universal = enum_auto()
#
RankKKindNames = {
RankKKind.Universal: "rank_k"
}
#
class TrmmKind(enum.Enum):
Universal = enum_auto()
#
TrmmKindNames = {
TrmmKind.Universal: "trmm"
}
#
class SymmKind(enum.Enum):
Universal = enum_auto()
#
SymmKindNames = {
SymmKind.Universal: "symm"
}
#
class EpilogueFunctor(enum.Enum):
LinearCombination = enum_auto()
LinearCombinationClamp = enum_auto()
#
EpilogueFunctorTag = {
EpilogueFunctor.LinearCombination: 'cutlass::epilogue::thread::LinearCombination',
EpilogueFunctor.LinearCombinationClamp: 'cutlass::epilogue::thread::LinearCombinationClamp',
}
#
class SwizzlingFunctor(enum.Enum):
Identity1 = enum_auto()
Identity2 = enum_auto()
Identity4 = enum_auto()
Identity8 = enum_auto()
Horizontal = enum_auto()
StridedDgradIdentity1 = enum_auto()
StridedDgradIdentity4 = enum_auto()
StridedDgradHorizontal = enum_auto()
StreamK = enum_auto()
#
SwizzlingFunctorTag = {
SwizzlingFunctor.Identity1: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>',
SwizzlingFunctor.Identity2: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<2>',
SwizzlingFunctor.Identity4: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<4>',
SwizzlingFunctor.Identity8: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>',
SwizzlingFunctor.Horizontal: 'cutlass::gemm::threadblock::GemmHorizontalThreadblockSwizzle',
SwizzlingFunctor.StridedDgradIdentity1: 'cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<1>',
SwizzlingFunctor.StridedDgradIdentity4: 'cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<4>',
SwizzlingFunctor.StridedDgradHorizontal: 'cutlass::conv::threadblock::StridedDgradHorizontalThreadblockSwizzle',
SwizzlingFunctor.StreamK: 'cutlass::gemm::threadblock::ThreadblockSwizzleStreamK',
}
#
class GroupScheduleMode(enum.Enum):
Device = enum_auto(),
Host = enum_auto()
#
GroupScheduleModeTag = {
GroupScheduleMode.Device: 'cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly',
GroupScheduleMode.Host: 'cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute'
}
#
ShortGroupScheduleModeNames = {
GroupScheduleMode.Device: 'Device',
GroupScheduleMode.Host: 'Host'
}
###################################################################################################
#
class ConvKind(enum.IntEnum):
Fprop = 0
Dgrad = 1
Wgrad = 2
#
ConvKindTag = {
ConvKind.Fprop: 'cutlass::conv::Operator::kFprop',
ConvKind.Dgrad: 'cutlass::conv::Operator::kDgrad',
ConvKind.Wgrad: 'cutlass::conv::Operator::kWgrad'
}
ConvKindNames = {
ConvKind.Fprop: 'fprop',
ConvKind.Dgrad: 'dgrad',
ConvKind.Wgrad: 'wgrad',
}
class ConvMode(enum.IntEnum):
CrossCorrelation = 0
Convolution = 1
#
class IteratorAlgorithm(enum.Enum):
Analytic = 0
Optimized = 1
FixedChannels = 2
FewChannels = 3
FixedStrideDilation = 4
#
IteratorAlgorithmTag = {
IteratorAlgorithm.Analytic: 'cutlass::conv::IteratorAlgorithm::kAnalytic',
IteratorAlgorithm.Optimized: 'cutlass::conv::IteratorAlgorithm::kOptimized',
IteratorAlgorithm.FixedChannels: 'cutlass::conv::IteratorAlgorithm::kFixedChannels',
IteratorAlgorithm.FewChannels: 'cutlass::conv::IteratorAlgorithm::kFewChannels',
IteratorAlgorithm.FixedStrideDilation: 'cutlass::conv::IteratorAlgorithm::kFixedStrideDilation'
}
IteratorAlgorithmNames = {
IteratorAlgorithm.Analytic: 'analytic',
IteratorAlgorithm.Optimized: 'optimized',
IteratorAlgorithm.FixedChannels: 'fixed_channels',
IteratorAlgorithm.FewChannels: 'few_channels',
IteratorAlgorithm.FixedStrideDilation: 'fixed_stride_dilation'
}
#
class StrideSupport(enum.Enum):
Strided = 0
Unity = 1
Fixed = 2
#
StrideSupportTag = {
StrideSupport.Strided: 'cutlass::conv::StrideSupport::kStrided',
StrideSupport.Unity: 'cutlass::conv::StrideSupport::kUnity',
StrideSupport.Fixed: 'cutlass::conv::StrideSupport::kFixed'
}
StrideSupportNames = {
StrideSupport.Strided: '',
StrideSupport.Unity: 'unity_stride',
StrideSupport.Fixed: 'fixed_stride'
}
#
class GroupMode(enum.Enum):
NoneGroup = enum_auto() # dense conv (G=1)
SingleGroup = enum_auto() # grouped convolution (single group per CTA)
MultipleGroup = enum_auto() # grouped convolution ( multiple groups per CTA)
Depthwise = enum_auto() # Depthwise convolution ( C=K=G )
#
GroupModeTag = {
GroupMode.NoneGroup: 'cutlass::conv::GroupMode::kNone',
GroupMode.SingleGroup: 'cutlass::conv::GroupMode::kSingleGroup',
GroupMode.MultipleGroup: 'cutlass::conv::GroupMode::kMultipleGroup',
GroupMode.Depthwise: 'cutlass::conv::GroupMode::kDepthwise',
}
GroupModeNames = {
GroupMode.NoneGroup: '',
GroupMode.SingleGroup: 'single_group',
GroupMode.MultipleGroup: 'multiple_group',
GroupMode.Depthwise: 'depthwise',
}
###################################################################################################
#
class MathInstruction:
def __init__(self,
instruction_shape, \
element_a, element_b, element_accumulator, \
opcode_class, math_operation = MathOperation.multiply_add \
):
self.instruction_shape = instruction_shape
self.element_a = element_a
self.element_b = element_b
self.element_accumulator = element_accumulator
self.opcode_class = opcode_class
self.math_operation = math_operation
#
class TileDescription:
def __init__(self, threadblock_shape, stages, warp_count, math_instruction, min_compute, max_compute, cluster_shape = [1,1,1]):
self.threadblock_shape = threadblock_shape
self.tile_shape = threadblock_shape
self.stages = stages
self.warp_count = warp_count
self.math_instruction = math_instruction
self.minimum_compute_capability = min_compute
self.maximum_compute_capability = max_compute
self.cluster_shape = cluster_shape
def procedural_name(self):
if self.minimum_compute_capability >= 90:
return "{tbm}x{tbn}x{tbk}_{cm}x{cn}x{ck}_{s}".format(
tbm = self.threadblock_shape[0],
tbn = self.threadblock_shape[1],
tbk = self.threadblock_shape[2],
cm = self.cluster_shape[0],
cn = self.cluster_shape[1],
ck = self.cluster_shape[2],
s = self.stages)
else:
return "%dx%d_%dx%d" % (self.threadblock_shape[0], self.threadblock_shape[1], self.threadblock_shape[2], self.stages)
#
class Direct2dConvFixedStrideDilationTileDescription:
def __init__(self, threadblock_output_shape, filter_shape, stages, stride, dilation, warp_count, math_instruction, min_compute, max_compute):
self.threadblock_shape = [threadblock_output_shape[0]*threadblock_output_shape[1]*threadblock_output_shape[2], threadblock_output_shape[3], filter_shape[0]*filter_shape[1]]
self.threadblock_output_shape = threadblock_output_shape
self.filter_shape = filter_shape
self.stages = stages
self.warp_count = warp_count
self.stride = stride
self.dilation = dilation
self.math_instruction = math_instruction
self.minimum_compute_capability = min_compute
self.maximum_compute_capability = max_compute
def procedural_name(self):
str_name = "%dx%dx%d_%dx%dx%dx%d_%d_filter%dx%d" % (self.threadblock_shape[0],
self.threadblock_shape[1],
self.threadblock_shape[2],
self.threadblock_output_shape[0],
self.threadblock_output_shape[1],
self.threadblock_output_shape[2],
self.threadblock_output_shape[3],
self.stages,
self.filter_shape[0],
self.filter_shape[1])
# Fixed Strided and dilation
if self.stride != [-1, -1] and self.dilation != [-1, -1]:
str_name += "_stride%dx%d_dilation%dx%d" % (self.stride[0],
self.stride[1],
self.dilation[0],
self.dilation[1])
return str_name
#
class Direct2dConvFixedStrideDilationTileDescription:
def __init__(self, threadblock_output_shape, filter_shape, stages, stride, dilation, warp_count, math_instruction, min_compute, max_compute):
self.threadblock_shape = [threadblock_output_shape[0]*threadblock_output_shape[1]*threadblock_output_shape[2], threadblock_output_shape[3], filter_shape[0]*filter_shape[1]]
self.threadblock_output_shape = threadblock_output_shape
self.filter_shape = filter_shape
self.stages = stages
self.warp_count = warp_count
self.stride = stride
self.dilation = dilation
self.math_instruction = math_instruction
self.minimum_compute_capability = min_compute
self.maximum_compute_capability = max_compute
def procedural_name(self):
str_name = "%dx%dx%d_%dx%dx%dx%d_%d_filter%dx%d" % (self.threadblock_shape[0],
self.threadblock_shape[1],
self.threadblock_shape[2],
self.threadblock_output_shape[0],
self.threadblock_output_shape[1],
self.threadblock_output_shape[2],
self.threadblock_output_shape[3],
self.stages,
self.filter_shape[0],
self.filter_shape[1])
# Fixed Strided and dilation
if self.stride != [-1, -1] and self.dilation != [-1, -1]:
str_name += "_stride%dx%d_dilation%dx%d" % (self.stride[0],
self.stride[1],
self.dilation[0],
self.dilation[1])
return str_name
#
class TensorDescription:
def __init__(self, element, layout, alignment = 1, complex_transform = ComplexTransform.none):
self.element = element
self.layout = layout
self.alignment = alignment
self.complex_transform = complex_transform
#
class SymmetricTensorDescription:
def __init__(self, element, layout, fill_mode, alignment = 1, complex_transform = ComplexTransform.none, side_mode = SideMode.Left):
self.element = element
self.layout = layout
self.fill_mode = fill_mode
self.alignment = alignment
self.complex_transform = complex_transform
self.side_mode = side_mode
#
class TriangularTensorDescription:
def __init__(self, element, layout, side_mode, fill_mode, diag_type, alignment = 1, complex_transform = ComplexTransform.none):
self.element = element
self.layout = layout
self.side_mode = side_mode
self.fill_mode = fill_mode
self.diag_type = diag_type
self.alignment = alignment
self.complex_transform = complex_transform
#
def CalculateSmemUsage(operation):
cta_shape = operation.tile_description.threadblock_shape
stages = operation.tile_description.stages
if operation.operation_kind == OperationKind.Gemm and operation.gemm_kind == GemmKind.Sparse:
# Elements represented by 8 bits of metadata (based on 4:8, 2:4 or 1:2 sparsity)
if DataTypeSize[operation.A.element] == 32:
elements_per_8b_md = 2
elif DataTypeSize[operation.A.element] == 4:
elements_per_8b_md = 8
else:
elements_per_8b_md = 4
smem_per_stage = DataTypeSize[operation.A.element] * cta_shape[0] * (cta_shape[2] // 2) // 8 + \
DataTypeSize[operation.B.element] * cta_shape[1] * cta_shape[2] // 8 + \
cta_shape[0] * (cta_shape[2] // 2) // elements_per_8b_md
else:
# Few BLAS3 operations only have A tensor
data_type_size_a = DataTypeSize[operation.A.element]
data_type_size_b = DataTypeSize[operation.A.element]
if operation.is_mixed_input():
data_type_size_b = DataTypeSize[operation.B.element]
smem_per_stage = data_type_size_a * cta_shape[0] * cta_shape[2] // 8 + \
data_type_size_b * cta_shape[1] * cta_shape[2] // 8
smem_usage = smem_per_stage * stages
return (smem_usage >> 10)
class GemmUniversalMode(enum.IntEnum):
"""
Types corresponding to GemmUniversalMode
"""
Gemm = 0
GemmSplitKParallel = 1
Batched = 2
Array = 3
class SplitKMode(enum.IntEnum):
"""
Types corresponding to SplitKMode
"""
NoneSplitK = 0
Serial = 1
Parallel = 2
| python/cutlass_library/library.py/0 | {
"file_path": "python/cutlass_library/library.py",
"repo_id": "python",
"token_count": 13414
} | 41 |
Examples
==================
.. toctree::
:maxdepth: 5
Basic GEMM <externals/00_basic_gemm.nblink>
Epilogue <externals/01_epilogue.nblink>
PyTorch Extension <externals/02_pytorch_extension_grouped_gemm.nblink>
| python/docs_src/source/examples.rst/0 | {
"file_path": "python/docs_src/source/examples.rst",
"repo_id": "python",
"token_count": 95
} | 42 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide Implicit GEMM interface
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h"
#include "cutlass/epilogue/thread/linear_combination_residual_block.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/conv/kernel/default_conv2d_fprop_with_broadcast.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "conv2d_with_broadcast_testbed.h"
#if defined(CUTLASS_ARCH_MMA_SM70_SUPPORTED)
// Test residual block fusion: UnaryOp(BinaryOp(ActivationOp(Conv2d(X) + bias), residual))
// LinearCombinationResidualBlock does not support the split-k mode unless ActivationOp is Identity.
// This is because the activation needs to be applied to the fully accumulated output of the Conv2d op,
// which only the last thread block would have an access to, before applying BinaryOp.
// The epilogue functor in the last thread block would have to be given three inputs, namely
// partial outputs, bias, and residual, but this is not supported in the current interface.
// Set TestSplitK = false to skip split-k tests with non-trivial ActivationOp.
template <
typename ElementAccumulator,
template<typename T> class ActivationOp,
template<typename T> class BinaryOp,
template<typename T> class UnaryOp,
bool TestSplitK = false
>
void Conv2dFpropSM70TestResidualBlock() {
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using ElementD = ElementC;
using ElementCompute = ElementAccumulator;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationResidualBlock<
ElementD,
ElementAccumulator,
ElementCompute,
ElementC,
8,
ActivationOp,
BinaryOp,
UnaryOp
>;
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFpropWithBroadcast<
ElementA, cutlass::layout::TensorNHWC,
ElementB, cutlass::layout::TensorNHWC,
ElementC, cutlass::layout::TensorNHWC,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm70,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<8, 8, 4>,
EpilogueOutputOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
2,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::IteratorAlgorithm::kOptimized
>::Kernel;
using Conv2dFprop = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
struct ReferenceOp {
using OutputOp = typename Conv2dFprop::EpilogueOutputOp;
using ElementZ = typename OutputOp::ElementZ;
ActivationOp<ElementCompute> activation;
BinaryOp<ElementCompute> binary_op;
UnaryOp<ElementCompute> unary_op;
void operator()(ElementZ &Z, ElementZ&, ElementCompute conv2d, ElementCompute residual) {
Z = ElementZ(unary_op(binary_op(activation(conv2d), residual)));
}
};
bool passed = test::conv::device::TestAllConv2dWithBroadcast<Conv2dFprop, ReferenceOp, true, TestSplitK>();
EXPECT_TRUE(passed);
}
TEST(SM70_Device_Conv2d_Fprop_With_Residual_Block_Plus_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32,
128x128_32x2_64x64x32) {
// Resnet
Conv2dFpropSM70TestResidualBlock<float, cutlass::epilogue::thread::ReLu, cutlass::plus, cutlass::epilogue::thread::Identity>();
}
////////////////////////////////////////////////////////////////////////////////
#endif // CUTLASS_ARCH_MMA_SM70_SUPPORTED
////////////////////////////////////////////////////////////////////////////////
| test/unit/conv/device/conv2d_fprop_with_broadcast_sm70.cu/0 | {
"file_path": "test/unit/conv/device/conv2d_fprop_with_broadcast_sm70.cu",
"repo_id": "test",
"token_count": 1744
} | 43 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for functional operators.
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/functional.h"
#include "cutlass/core_io.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/util/host_tensor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace core {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Conversion template
template <typename Element, typename Operator>
__global__ void unary_operator(Element *d, Element const *a) {
Operator op;
*d = op(*a);
}
/// Conversion template
template <typename Element, typename Operator>
__global__ void binary_operator(Element *d, Element const *a, Element const *b, int Iterations = 1) {
Operator op;
Element a_x = *a;
Element b_x = *b;
CUTLASS_PRAGMA_NO_UNROLL
for (int i = 0; i < Iterations; ++i) {
b_x = op(a_x, b_x);
}
*d = b_x;
}
/// Conversion template
template <typename Element, typename Operator>
__global__ void trinary_operator(
Element *d,
Element const *a,
Element const *b,
Element const *c,
int Iterations = 1) {
Operator op;
Element a_x = a[blockIdx.x];
Element b_x = b[blockIdx.x];
Element c_x = c[blockIdx.x];
CUTLASS_PRAGMA_NO_UNROLL
for (int i = 0; i < Iterations; ++i) {
c_x = op(a_x, b_x, c_x);
}
d[blockIdx.x] = c_x;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace core
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
template <int kN>
void Functional_plus_f16xN() {
using Element = cutlass::Array<cutlass::half_t, kN>;
using Operator = cutlass::plus<Element>;
using Tensor = cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor>;
Tensor D({1, kN});
Tensor A({1, kN});
Tensor B({1, kN});
Tensor C({1, kN});
for (int i = 0; i < kN; ++i) {
A.host_data()[i] = cutlass::half_t((i * 2 + 1) % 5);
B.host_data()[i] = cutlass::half_t((i * 4 + 8) % 7);
D.host_data()[i] = cutlass::half_t(0);
}
D.sync_device();
A.sync_device();
B.sync_device();
test::core::kernel::binary_operator<Element, Operator><<< dim3(1,1), dim3(1,1) >>>(
reinterpret_cast<Element *>(D.device_data()),
reinterpret_cast<Element const *>(A.device_data()),
reinterpret_cast<Element const *>(B.device_data())
);
D.sync_host();
bool some_d_nonzero = false;
for (int i = 0; i < kN; ++i) {
float a = float(A.host_data()[i]);
float b = float(B.host_data()[i]);
float d = float(D.host_data()[i]);
EXPECT_TRUE(d == (a + b));
if (d != 0) {
some_d_nonzero = true;
}
}
EXPECT_TRUE(some_d_nonzero);
}
TEST(Functional, plus_f16x16) {
Functional_plus_f16xN<16>();
}
TEST(Functional, plus_f16x17) {
Functional_plus_f16xN<17>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <int kN>
void Functional_minus_f16xN() {
using Element = cutlass::Array<cutlass::half_t, kN>;
using Operator = cutlass::minus<Element>;
using Tensor = cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor>;
Tensor D({1, kN});
Tensor A({1, kN});
Tensor B({1, kN});
Tensor C({1, kN});
for (int i = 0; i < kN; ++i) {
A.host_data()[i] = cutlass::half_t((i * 2 + 1) % 5);
B.host_data()[i] = cutlass::half_t((i * 4 + 8) % 7);
D.host_data()[i] = cutlass::half_t(0);
}
D.sync_device();
A.sync_device();
B.sync_device();
test::core::kernel::binary_operator<Element, Operator><<< dim3(1,1), dim3(1,1) >>>(
reinterpret_cast<Element *>(D.device_data()),
reinterpret_cast<Element const *>(A.device_data()),
reinterpret_cast<Element const *>(B.device_data())
);
D.sync_host();
bool some_d_nonzero = false;
for (int i = 0; i < kN; ++i) {
float a = float(A.host_data()[i]);
float b = float(B.host_data()[i]);
float d = float(D.host_data()[i]);
EXPECT_TRUE(d == (a - b));
if (d != 0) {
some_d_nonzero = true;
}
}
EXPECT_TRUE(some_d_nonzero);
}
TEST(Functional, minus_f16x16) {
Functional_minus_f16xN<16>();
}
TEST(Functional, minus_f16x17) {
Functional_minus_f16xN<17>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <int kN>
void Functional_multiplies_f16xN() {
using Element = cutlass::Array<cutlass::half_t, kN>;
using Operator = cutlass::multiplies<Element>;
using Tensor = cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor>;
Tensor D({1, kN});
Tensor A({1, kN});
Tensor B({1, kN});
Tensor C({1, kN});
for (int i = 0; i < kN; ++i) {
A.host_data()[i] = cutlass::half_t((i * 2 + 1) % 5);
B.host_data()[i] = cutlass::half_t((i * 4 + 8) % 7);
D.host_data()[i] = cutlass::half_t(0);
}
D.sync_device();
A.sync_device();
B.sync_device();
test::core::kernel::binary_operator<Element, Operator><<< dim3(1,1), dim3(1,1) >>>(
reinterpret_cast<Element *>(D.device_data()),
reinterpret_cast<Element const *>(A.device_data()),
reinterpret_cast<Element const *>(B.device_data())
);
D.sync_host();
bool some_d_nonzero = false;
for (int i = 0; i < kN; ++i) {
float a = float(A.host_data()[i]);
float b = float(B.host_data()[i]);
float d = float(D.host_data()[i]);
EXPECT_TRUE(d == (a * b));
if (d != 0) {
some_d_nonzero = true;
}
}
EXPECT_TRUE(some_d_nonzero);
}
TEST(Functional, multiplies_f16x16) {
Functional_multiplies_f16xN<16>();
}
TEST(Functional, multiplies_f16x17) {
Functional_multiplies_f16xN<17>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <int kN>
void Functional_divides_f16xN() {
using Element = cutlass::Array<cutlass::half_t, kN>;
using Operator = cutlass::divides<Element>;
using Tensor = cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor>;
Tensor D({1, kN});
Tensor A({1, kN});
Tensor B({1, kN});
Tensor C({1, kN});
for (int i = 0; i < kN; ++i) {
A.host_data()[i] = cutlass::half_t((i * 2 + 1) % 5);
B.host_data()[i] = cutlass::half_t((i * 4 + 8) % 7);
D.host_data()[i] = cutlass::half_t(0);
}
D.sync_device();
A.sync_device();
B.sync_device();
test::core::kernel::binary_operator<Element, Operator><<< dim3(1,1), dim3(1,1) >>>(
reinterpret_cast<Element *>(D.device_data()),
reinterpret_cast<Element const *>(A.device_data()),
reinterpret_cast<Element const *>(B.device_data())
);
D.sync_host();
bool some_d_nonzero = false;
for (int i = 0; i < kN; ++i) {
float a = float(A.host_data()[i]);
float b = float(B.host_data()[i]);
float d = float(D.host_data()[i]);
float expected = a / b;
float const kThreshold = 0.0005f;
if (std::isnan(expected)) {
EXPECT_TRUE(std::isnan(d));
}
else if (std::isinf(expected)) {
EXPECT_TRUE(std::isinf(d));
}
else {
EXPECT_TRUE(std::abs(d - expected) < kThreshold)
<< "Got: " << d << " = " << a << " / " << b << ", expected: " << (a / b);
}
if (d != 0) {
some_d_nonzero = true;
}
}
EXPECT_TRUE(some_d_nonzero);
}
TEST(Functional, divides_f16x16) {
Functional_divides_f16xN<16>();
}
TEST(Functional, divides_f16x17) {
Functional_divides_f16xN<17>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, int kN>
void Functional_multiply_add_TxN() {
using Element = cutlass::Array<T, kN>;
using Operator = cutlass::multiply_add<Element>;
using Tensor = cutlass::HostTensor<T, cutlass::layout::RowMajor>;
Tensor D({1, kN});
Tensor A({1, kN});
Tensor B({1, kN});
Tensor C({1, kN});
for (int i = 0; i < kN; ++i) {
A.host_data()[i] = T((i * 2 + 1) % 5);
B.host_data()[i] = T((i * 4 + 8) % 7);
C.host_data()[i] = T((i * 3 + 11) % 11);
D.host_data()[i] = T(0);
}
D.sync_device();
A.sync_device();
B.sync_device();
C.sync_device();
test::core::kernel::trinary_operator<Element, Operator><<< dim3(1,1), dim3(1,1) >>>(
reinterpret_cast<Element *>(D.device_data()),
reinterpret_cast<Element const *>(A.device_data()),
reinterpret_cast<Element const *>(B.device_data()),
reinterpret_cast<Element const *>(C.device_data())
);
D.sync_host();
bool some_d_nonzero = false;
for (int i = 0; i < kN; ++i) {
float a = float(A.host_data()[i]);
float b = float(B.host_data()[i]);
float c = float(C.host_data()[i]);
float d = float(D.host_data()[i]);
EXPECT_TRUE(d == (a * b + c));
if (d != 0) {
some_d_nonzero = true;
}
}
EXPECT_TRUE(some_d_nonzero);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Functional, multiply_add_f16x16) {
Functional_multiply_add_TxN<cutlass::half_t, 16>();
}
TEST(Functional, multiply_add_f16x17) {
Functional_multiply_add_TxN<cutlass::half_t, 17>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Functional, multiply_add_bf16x16) {
Functional_multiply_add_TxN<cutlass::bfloat16_t, 16>();
}
TEST(Functional, multiply_add_bf16x17) {
Functional_multiply_add_TxN<cutlass::bfloat16_t, 17>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
cutlass::Quaternion<T> random_quaternion(int range) {
return cutlass::Quaternion<T>{
T((rand() % range * 2) - range),
T((rand() % range * 2) - range),
T((rand() % range * 2) - range),
T((rand() % range * 2) - range)
};
}
template <typename T>
void Functional_multiply_add_QuaternionT() {
using Element = cutlass::Quaternion<T>;
using Operator = cutlass::multiply_add<Element, Element, Element>;
using HostTensor = cutlass::HostTensor<Element, cutlass::layout::RowMajor>;
int const kM = 128;
int const kRange = 8;
HostTensor A({kM, 1});
HostTensor B({kM, 1});
HostTensor C({kM, 1});
HostTensor D({kM, 1});
srand(2021);
for (int m = 0; m < kM; ++m) {
A.at({m, 0}) = random_quaternion<T>(kRange);
B.at({m, 0}) = random_quaternion<T>(kRange);
C.at({m, 0}) = random_quaternion<T>(kRange);
}
A.sync_device();
B.sync_device();
C.sync_device();
D.sync_device();
test::core::kernel::trinary_operator<Element, Operator><<< dim3(kM,1), dim3(1,1) >>>(
D.device_data(),
A.device_data(),
B.device_data(),
C.device_data()
);
D.sync_host();
for (int m = 0; m < kM; ++m) {
Element a = A.at({m, 0});
Element b = B.at({m, 0});
Element c = C.at({m, 0});
Element got = D.at({m, 0});
Element expected = a * b + c;
EXPECT_TRUE(got == expected);
}
}
TEST(Functional, multiply_add_quaternion_f32) {
Functional_multiply_add_QuaternionT<float>();
}
namespace cutlass_test {
__global__ void
test_cutlass_maximum(cutlass::half_t const* in1, cutlass::half_t const* in2, cutlass::half_t* out)
{
{
constexpr bool propagate_NaN = true;
cutlass::maximum<cutlass::half_t, propagate_NaN> op;
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0
&& blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) {
*out = op(*in1, *in2);
}
}
{
constexpr bool propagate_NaN = false;
cutlass::maximum<cutlass::half_t, propagate_NaN> op;
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0
&& blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) {
*out = op(*in1, *in2);
}
}
}
} // cutlass_test
// Test compilation on both host and device.
TEST(Functional, maximum_half_host_propagate_NaN) {
constexpr bool propagate_NaN = true;
cutlass::maximum<cutlass::half_t, propagate_NaN> op;
cutlass::half_t x(1.0f);
cutlass::half_t y(2.0f);
auto result = op(x, y);
static_assert(std::is_same_v<decltype(result), cutlass::half_t>);
EXPECT_EQ(result, y);
result = op(y, x);
EXPECT_EQ(result, y);
}
TEST(Functional, maximum_half_host_dont_propagate_NaN) {
constexpr bool propagate_NaN = false;
cutlass::maximum<cutlass::half_t, propagate_NaN> op;
cutlass::half_t x(1.0f);
cutlass::half_t y(2.0f);
auto result = op(x, y);
static_assert(std::is_same_v<decltype(result), cutlass::half_t>);
EXPECT_EQ(result, y);
result = op(y, x);
EXPECT_EQ(result, y);
}
TEST(Function, maximum_half_device) {
using Tensor = cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor>;
Tensor in1({1, 1});
Tensor in2({1, 1});
Tensor out({1, 1});
in1.host_data()[0] = cutlass::half_t(1.0f);
in2.host_data()[0] = cutlass::half_t(2.0f);
out.host_data()[0] = cutlass::half_t(0.0f);
in1.sync_device();
in2.sync_device();
out.sync_device();
cutlass_test::test_cutlass_maximum<<< 1, 1 >>>(
in1.device_data(),
in2.device_data(),
out.device_data()
);
out.sync_host();
EXPECT_EQ(out.host_data()[0], 2.0f);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/core/functional.cu/0 | {
"file_path": "test/unit/core/functional.cu",
"repo_id": "test",
"token_count": 5801
} | 44 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cute/tensor.hpp>
#include "../cooperative_gemm_common.hpp"
using namespace cute;
TEST(SM80_CuTe_Ampere, CooperativeGemm1_Half_MMA) {
using value_type = cutlass::half_t;
constexpr uint32_t m = 64;
constexpr uint32_t n = 64;
constexpr uint32_t k = 64;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<SM80_16x8x8_F16F16F16F16_TN>,
Layout<Shape<_2, _2, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>();
}
TEST(SM80_CuTe_Ampere, CooperativeGemm2_Double_MMA) {
using value_type = double;
constexpr uint32_t m = 64;
constexpr uint32_t n = 64;
constexpr uint32_t k = 64;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<SM80_8x8x4_F64F64F64F64_TN>,
Layout<Shape<_2,_2,_1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>();
}
TEST(SM80_CuTe_Ampere, CooperativeGemm3_Half_MMA_CustomSmemLayouts) {
using value_type = cutlass::half_t;
constexpr uint32_t m = 128;
constexpr uint32_t n = 128;
constexpr uint32_t k = 128;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<SM80_16x8x16_F16F16F16F16_TN>,
Layout<Shape<_2, _2, _1>>, // 2x2x1 thread group
Tile<_32, _32, _16> // 32x32x16 MMA for LDSM, 1x2x1 value group`
>;
using smem_a_atom_layout_t = Layout<Shape<_64, _8>, Stride< _1,_64>>;
using smem_b_atom_layout_t = Layout<Shape< _8,_32>, Stride<_32, _1>>;
using smem_c_atom_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{})));
test_cooperative_gemm_col_major_layout<smem_a_atom_layout_t,
smem_b_atom_layout_t,
smem_c_atom_layout_t,
m,
n,
k,
thread_block_size,
tiled_mma_t,
128,
value_type,
value_type,
value_type>();
}
TEST(SM80_CuTe_Ampere, CooperativeGemm4_Half_MMA_SwizzledSmemLayouts) {
using value_type = cutlass::half_t;
constexpr uint32_t m = 128;
constexpr uint32_t n = 128;
constexpr uint32_t k = 128;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<SM80_16x8x16_F16F16F16F16_TN>,
Layout<Shape<_2, _2, _1>>, // 2x2x1 thread group
Tile<_32, _32, _16> // 32x32x16 MMA for LDSM, 1x2x1 value group`
>;
// RowMajor
using smem_rowmajor_atom_layout_t = decltype(
composition(Swizzle<3,3,3>{},
Layout<Shape < _8,_64>,
Stride<_64, _1>>{}));
// ColMajor
using smem_colmajor_atom_layout_t = decltype(
composition(Swizzle<3,3,3>{},
Layout<Shape <_64, _8>,
Stride< _1,_64>>{}));
using smem_a_atom_layout_t = smem_rowmajor_atom_layout_t;
using smem_b_atom_layout_t = smem_colmajor_atom_layout_t;
using smem_c_atom_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{}), GenRowMajor{}));
using gmem_a_layout_t = decltype(make_layout(make_shape(Int<m> {}, Int<k> {}), GenRowMajor{}));
using gmem_b_layout_t = decltype(make_layout(make_shape(Int<n> {}, Int<k> {}), GenColMajor{}));
using gmem_c_layout_t = decltype(make_layout(make_shape(Int<m> {}, Int<n> {}), GenRowMajor{}));
using smem_a_atom_layout_t = smem_a_atom_layout_t;
using smem_a_layout_t = decltype(tile_to_shape(
smem_a_atom_layout_t{},
make_shape(shape<0>(gmem_a_layout_t{}), shape<1>(gmem_a_layout_t{})))
);
using smem_b_atom_layout_t = smem_b_atom_layout_t;
using smem_b_layout_t = decltype(tile_to_shape(
smem_b_atom_layout_t{},
make_shape(shape<0>(gmem_b_layout_t{}), shape<1>(gmem_b_layout_t{})))
);
using smem_c_atom_layout_t = smem_c_atom_layout_t;
using smem_c_layout_t = decltype(tile_to_shape(
smem_c_atom_layout_t{},
make_shape(shape<0>(gmem_c_layout_t{}), shape<1>(gmem_c_layout_t{})))
);
test_cooperative_gemm<gmem_a_layout_t,
gmem_b_layout_t,
gmem_c_layout_t,
smem_a_layout_t,
smem_b_layout_t,
smem_c_layout_t,
SM75_U32x4_LDSM_N, // A
SM75_U16x8_LDSM_T, // B
AutoVectorizingCopyWithAssumedAlignment<128>, // C
thread_block_size,
tiled_mma_t,
128,
value_type,
value_type,
value_type>();
}
TEST(SM80_CuTe_Ampere, CooperativeGemm5_Double_MMA_SwizzledSmemLayouts) {
using value_type = double;
constexpr uint32_t m = 128;
constexpr uint32_t n = 64;
constexpr uint32_t k = 16;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<MMA_Atom<SM80_8x8x4_F64F64F64F64_TN>, // Atom
Layout<Shape<_2, _2, _1>>, // Atom layout
Tile<Layout<Shape<_16, _2>, Stride<_2, _1>>, // 32x32x4 MMA with perm for load vectorization
Layout<Shape<_16, _2>, Stride<_2, _1>>,
Underscore>>;
using smem_a_atom_layout_t = decltype(
composition(Swizzle<2,2,2>{},
Layout<Shape <_16, _4>,
Stride< _1,_16>>{})); // M, K
using smem_b_atom_layout_t = decltype(
composition(Swizzle<2,2,2>{},
Layout<Shape <_16, _4>,
Stride< _1,_16>>{})); // N, K
using smem_c_atom_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{}), GenRowMajor{}));
using gmem_a_layout_t = decltype(make_layout(make_shape(Int<m> {}, Int<k> {}), GenRowMajor{}));
using gmem_b_layout_t = decltype(make_layout(make_shape(Int<n> {}, Int<k> {}), GenColMajor{}));
using gmem_c_layout_t = decltype(make_layout(make_shape(Int<m> {}, Int<n> {}), GenRowMajor{}));
using smem_a_atom_layout_t = smem_a_atom_layout_t;
using smem_a_layout_t = decltype(tile_to_shape(
smem_a_atom_layout_t{},
make_shape(shape<0>(gmem_a_layout_t{}), shape<1>(gmem_a_layout_t{})))
);
using smem_b_atom_layout_t = smem_b_atom_layout_t;
using smem_b_layout_t = decltype(tile_to_shape(
smem_b_atom_layout_t{},
make_shape(shape<0>(gmem_b_layout_t{}), shape<1>(gmem_b_layout_t{})))
);
using smem_c_atom_layout_t = smem_c_atom_layout_t;
using smem_c_layout_t = decltype(tile_to_shape(
smem_c_atom_layout_t{},
make_shape(shape<0>(gmem_c_layout_t{}), shape<1>(gmem_c_layout_t{})))
);
test_cooperative_gemm<gmem_a_layout_t,
gmem_b_layout_t,
gmem_c_layout_t,
smem_a_layout_t,
smem_b_layout_t,
smem_c_layout_t,
AutoVectorizingCopyWithAssumedAlignment<128>, // A
AutoVectorizingCopyWithAssumedAlignment<128>, // B
AutoVectorizingCopyWithAssumedAlignment<128>, // C
thread_block_size,
tiled_mma_t,
128,
value_type,
value_type,
value_type>();
}
TEST(SM80_CuTe_Ampere, CooperativeGemm6_MixedPrecisionFP16FP32_MMA) {
using TA = cutlass::half_t;
using TB = cutlass::half_t;
using TC = float;
constexpr uint32_t m = 64;
constexpr uint32_t n = 64;
constexpr uint32_t k = 64;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<SM80_16x8x8_F32F16F16F32_TN>,
Layout<Shape<_2, _2, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, 128, TA, TB, TC>();
}
TEST(SM80_CuTe_Ampere, CooperativeGemm7_MixedPrecisionBF16FP32_MMA) {
using TA = cutlass::bfloat16_t;
using TB = cutlass::bfloat16_t;
using TC = float;
constexpr uint32_t m = 64;
constexpr uint32_t n = 64;
constexpr uint32_t k = 64;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<SM80_16x8x8_F32BF16BF16F32_TN>,
Layout<Shape<_2, _2, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, 128, TA, TB, TC>();
}
TEST(SM80_CuTe_Ampere, CooperativeGemm8_MixedPrecisionTF32FP32_MMA) {
using TA = cutlass::tfloat32_t;
using TB = cutlass::tfloat32_t;
using TC = float;
constexpr uint32_t m = 64;
constexpr uint32_t n = 64;
constexpr uint32_t k = 64;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<SM80_16x8x8_F32TF32TF32F32_TN>,
Layout<Shape<_2, _2, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, 128, TA, TB, TC>();
}
TEST(SM80_CuTe_Ampere, CooperativeGemm9_C64C64C64_MMA) {
using TA = cutlass::complex<double>;
using TB = cutlass::complex<double>;
using TC = cutlass::complex<double>;
constexpr uint32_t thread_block_size = 256;
constexpr int MaxVecBits = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<SM80_8x8x4_C64C64C64C64_TN>,
Layout<Shape<_4, _4, _1>, Stride<_1, _4, _0>>,
Tile<Underscore, Underscore, Underscore>
>;
using ALayout = Layout<Shape<Int<13>,Int<35>>, Stride<Int<44>, Int<1> >>;
using BLayout = Layout<Shape< Int<7>, Int<35>>, Stride<Int<44>, Int<1> >>;
using CLayout = Layout<Shape<Int<13>, Int<7>>, Stride< Int<1>, Int<30>>>;
test_cooperative_gemm<ALayout,
BLayout,
CLayout,
ALayout,
BLayout,
CLayout,
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // A
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // B
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // C
thread_block_size,
tiled_mma_t,
MaxVecBits,
TA,
TB,
TC>();
}
TEST(SM80_CuTe_Ampere, CooperativeGemm10_F16F64F16_FMA) {
using TA = cutlass::half_t;
using TB = double;
using TC = cutlass::half_t;
constexpr uint32_t thread_block_size = 256;
constexpr int MaxVecBits = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<UniversalFMA<half_t, half_t, double, half_t>>,
Layout<Shape<_16, _16, _1>, Stride<_1, _16, _0>>,
Tile<Underscore, Underscore, Underscore>
>;
using ALayout = Layout<Shape<Int<64>,Int<64>>, Stride<Int<64>, Int< 1>>>;
using BLayout = Layout<Shape<Int<64>,Int<64>>, Stride<Int< 1>, Int<64>>>;
using CLayout = Layout<Shape<Int<64>,Int<64>>, Stride<Int< 1>, Int<64>>>;
test_cooperative_gemm<ALayout,
BLayout,
CLayout,
ALayout,
BLayout,
CLayout,
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // A
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // B
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // C
thread_block_size,
tiled_mma_t,
MaxVecBits,
TA,
TB,
TC>();
}
TEST(SM80_CuTe_Ampere, CooperativeGemmComposedStride) {
using T = cute::half_t;
constexpr uint32_t thread_block_size = 128;
constexpr int MaxVecBits = 16;
using tiled_mma_t =
TiledMMA<
MMA_Atom<SM80_16x8x16_F16F16F16F16_TN>,
Layout<Shape<_2, _2, _1>, Stride<_1, _2, _0>>,
Tile<Underscore, Underscore, Underscore>
>;
using swizzle = cute::Swizzle<3, 3, 3>;
using offset = cute::_0;
using atom_tile_right = decltype(cute::make_layout(cute::Shape<cute::_8, cute::_64>{}, cute::LayoutRight{}));
using FP16AtomLayoutRight = decltype(cute::composition(swizzle{}, offset{}, atom_tile_right{}));
using shape = cute::Shape<cute::Int<128>, cute::Int<128>>;
using global_a_layout = decltype(cute::make_layout(shape{}, cute::LayoutRight{}));
using global_b_layout = decltype(cute::make_layout(shape{}, cute::LayoutLeft{}));
using global_c_layout = decltype(cute::make_layout(shape{}, cute::LayoutRight{}));
// This is for A row major, B col major according to CUTLASS default configs
using ALayout = decltype(cute::tile_to_shape(FP16AtomLayoutRight{}, global_a_layout{}));
using BLayout = decltype(cute::tile_to_shape(FP16AtomLayoutRight{}, global_b_layout{}));
using CLayout = global_c_layout;
test_cooperative_gemm<ALayout,
BLayout,
CLayout,
ALayout,
BLayout,
CLayout,
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // A
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // B
AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, // C
thread_block_size,
tiled_mma_t,
MaxVecBits,
T,
T,
T>();
}
TEST(SM89_CuTe_Ampere, CooperativeGemm8_MixedPrecisionTF32FP32_Transform) {
using TA = cutlass::tfloat32_t;
using TB = cutlass::tfloat32_t;
using TC = float;
constexpr uint32_t m = 9;
constexpr uint32_t n = 9;
constexpr uint32_t k = 9;
constexpr uint32_t thread_block_size = 64;
using tiled_mma_t =
TiledMMA<
MMA_Atom<SM80_16x8x8_F32TF32TF32F32_TN>,
Layout<Shape<_1, _2, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, 16, TA, TB, TC>(cute::negate{}, cute::negate{}, cute::negate{}, cute::negate{});
}
| test/unit/cute/ampere/cooperative_gemm.cu/0 | {
"file_path": "test/unit/cute/ampere/cooperative_gemm.cu",
"repo_id": "test",
"token_count": 8231
} | 45 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.