text
stringlengths 64
2.42M
| id
stringlengths 10
118
| metadata
dict | __index_level_0__
int64 0
65
|
---|---|---|---|
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Base functionality for common types of universal GEMM kernel parameters
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace util {
template <class LayoutA, class LayoutB>
CUTLASS_HOST_DEVICE
static bool
is_continous_k_aligned(GemmCoord problem_size, size_t alignmentA, size_t alignmentB) {
return (platform::is_same<LayoutA, layout::RowMajor>::value && (problem_size.k() % alignmentA) == 0) ||
(platform::is_same<LayoutB, layout::ColumnMajor>::value && (problem_size.k() % alignmentB) == 0);
}
} // namespace util
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Argument structure
struct UniversalArgumentsBase
{
//
// Data members
//
GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
GemmCoord problem_size{};
int batch_count{1};
int64_t batch_stride_D{0};
//
// Methods
//
UniversalArgumentsBase() = default;
/// constructs an arguments structure
UniversalArgumentsBase(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
int64_t batch_stride_D)
:
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
batch_stride_D(batch_stride_D)
{
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
};
/// Parameters structure
template <
typename ThreadblockSwizzle,
typename ThreadblockShape,
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutA,
typename LayoutB>
struct UniversalParamsBase
{
//
// Data members
//
GemmCoord problem_size{};
GemmCoord grid_tiled_shape{};
int swizzle_log_tile{0};
GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
int batch_count {0};
int gemm_k_size {0};
int64_t batch_stride_D {0};
int *semaphore = nullptr;
//
// Host dispatch API
//
/// Default constructor
UniversalParamsBase() = default;
/// Constructor
UniversalParamsBase(
UniversalArgumentsBase const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
problem_size(args.problem_size),
mode(args.mode),
batch_count(args.batch_count),
batch_stride_D(args.batch_stride_D),
semaphore(nullptr)
{
init_grid_tiled_shape();
}
/// Returns the workspace size (in bytes) needed for this problem geometry
size_t get_workspace_size() const
{
size_t workspace_bytes = 0;
if (mode == GemmUniversalMode::kGemmSplitKParallel)
{
// Split-K parallel always requires a temporary workspace
workspace_bytes =
sizeof(ElementC) *
size_t(batch_stride_D) *
size_t(grid_tiled_shape.k());
}
else if (mode == GemmUniversalMode::kGemm && grid_tiled_shape.k() > 1)
{
// Serial split-K only requires a temporary workspace if the number of partitions along the
// GEMM K dimension is greater than one.
workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n());
}
return workspace_bytes;
}
/// Assign and initialize the specified workspace buffer. Assumes
/// the memory allocated to workspace is at least as large as get_workspace_size().
Status init_workspace(
void *workspace,
cudaStream_t stream = nullptr)
{
semaphore = static_cast<int *>(workspace);
// Zero-initialize entire workspace
if (semaphore)
{
size_t workspace_bytes = get_workspace_size();
CUTLASS_TRACE_HOST(" Initialize " << workspace_bytes << " workspace bytes");
cudaError_t result = cudaMemsetAsync(
semaphore,
0,
workspace_bytes,
stream);
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Returns the GEMM volume in thread block tiles
GemmCoord get_tiled_shape() const
{
return grid_tiled_shape;
}
/// Returns the total number of thread blocks to launch
int get_grid_blocks() const
{
dim3 grid_dims = get_grid_dims();
return grid_dims.x * grid_dims.y * grid_dims.z;
}
/// Returns the grid extents in thread blocks to launch
dim3 get_grid_dims() const
{
return ThreadblockSwizzle().get_grid_shape(grid_tiled_shape);
}
private:
CUTLASS_HOST_DEVICE
void init_grid_tiled_shape() {
// Get GEMM volume in thread block tiles
grid_tiled_shape = ThreadblockSwizzle::get_tiled_shape(
problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
batch_count);
swizzle_log_tile = ThreadblockSwizzle::get_log_tile(grid_tiled_shape);
// Determine extent of K-dimension assigned to each block
gemm_k_size = problem_size.k();
if (mode == GemmUniversalMode::kGemm || mode == GemmUniversalMode::kGemmSplitKParallel)
{
static const uint32_t CACHELINE_BYTES = 128;
static const size_t element_bytes_a = sizeof(ElementA);
static const size_t element_bytes_b = sizeof(ElementB);
static const size_t cacheline_elements_a = CACHELINE_BYTES / element_bytes_a;
static const size_t cacheline_elements_b = CACHELINE_BYTES / element_bytes_b;
const bool cacheline_alignment_needed =
util::is_continous_k_aligned<LayoutA, LayoutB>(problem_size, cacheline_elements_a, cacheline_elements_b);
int const kAlignK = const_max(
const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value),
cacheline_alignment_needed ? const_max(cacheline_elements_a, cacheline_elements_b) : 1);
gemm_k_size = round_up(ceil_div(problem_size.k(), batch_count), kAlignK);
if (gemm_k_size) {
grid_tiled_shape.k() = ceil_div(problem_size.k(), gemm_k_size);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/params_universal_base.h/0 | {
"file_path": "include/cutlass/gemm/kernel/params_universal_base.h",
"repo_id": "include",
"token_count": 2874
} | 28 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/fast_math.h"
#include "cutlass/gemm_coord.hpp"
#include "cutlass/kernel_hardware_info.hpp"
#include "cutlass/gemm/kernel/tile_scheduler_params.h"
#include "cute/layout.hpp"
#include "cute/tensor.hpp"
#include "cute/arch/cluster_sm90.hpp"
namespace cutlass::gemm::kernel::detail {
///////////////////////////////////////////////////////////////////////////////
// Persistent Thread Block (TB) scheduler
template <class GroupProblemShape>
class PersistentTileSchedulerSm90Group {
//
// Data members
//
private:
uint64_t current_work_linear_idx_ = 0;
uint64_t total_grid_size_ = 0;
// Tracking current group, its starting linear idx and total tiles
struct GroupInfo {
int group_idx = 0;
uint64_t start_linear_idx = 0;
uint64_t total_tiles = 0;
} current_group_info_;
public:
struct WorkTileInfo {
int32_t M_idx = 0;
int32_t N_idx = 0;
int32_t L_idx = 0;
bool is_valid_tile = false;
CUTLASS_HOST_DEVICE
bool
is_valid() const {
return is_valid_tile;
}
CUTLASS_HOST_DEVICE
static WorkTileInfo
invalid_work_tile() {
return {-1, -1, -1, false};
}
CUTLASS_HOST_DEVICE
bool
is_final_split(uint32_t k_tiles_per_output_tile) const {
return true;
}
CUTLASS_HOST_DEVICE
int32_t
reduction_subtile_idx() const {
return -1;
}
};
using ProblemShape = typename GroupProblemShape::UnderlyingProblemShape;
using Params = PersistentTileSchedulerSm90GroupParams<ProblemShape>;
using RasterOrder = typename Params::RasterOrder;
using RasterOrderOptions = typename Params::RasterOrderOptions;
struct Arguments {
int max_swizzle_size = 1;
// Not applying Heuristics for Grouped problems, since largest dimension can change per group
RasterOrderOptions raster_order = RasterOrderOptions::AlongM;
};
// Sink scheduler params as a member
Params scheduler_params;
//
// Methods
//
template <class TileShape, class ClusterShape>
static Params
to_underlying_arguments(
GroupProblemShape problem_shapes,
TileShape tile_shape,
ClusterShape cluster_shape,
KernelHardwareInfo const& hw_info,
Arguments const& arguments,
[[maybe_unused]] void* workspace=nullptr,
[[maybe_unused]] const uint32_t epilogue_subtile = 1) {
// We only need the tile and cluster shape during scheduler setup, so let FTAD do the magic
static_assert(cute::is_static<TileShape>::value);
static_assert(cute::is_static<ClusterShape>::value);
dim3 problem_blocks = get_tiled_cta_shape_mnl(
problem_shapes.groups(),
problem_shapes,
hw_info,
tile_shape, cluster_shape);
Params params;
params.initialize(
problem_blocks,
problem_shapes.groups(),
problem_shapes.problem_shapes,
problem_shapes.host_problem_shapes,
to_gemm_coord(tile_shape),
to_gemm_coord(cluster_shape),
hw_info,
arguments.max_swizzle_size,
arguments.raster_order
);
return params;
}
// Given the inputs, computes the physical grid we should launch.
template<class TileShape, class ClusterShape>
CUTLASS_HOST_DEVICE static
dim3
get_grid_shape(
GroupProblemShape problem_shapes,
TileShape tile_shape,
ClusterShape cluster_shape,
KernelHardwareInfo hw_info,
Arguments arguments,
bool truncate_by_problem_size=true) {
dim3 problem_blocks = get_tiled_cta_shape_mnl(
problem_shapes.groups(),
problem_shapes,
hw_info,
tile_shape, cluster_shape);
return Params::get_grid_shape(
problem_blocks,
to_gemm_coord(cluster_shape),
hw_info,
arguments.max_swizzle_size,
arguments.raster_order,
/* truncate_by_problem_size = */true
);
}
// Given the inputs, computes the total number of output blocks this problem will compute over
// Note that this is only the logical size of our grid, not the physical grid we will actually launch.
template<class BlockShape, class ClusterShape>
CUTLASS_HOST_DEVICE static
dim3
get_tiled_cta_shape_mnl(int groups, GroupProblemShape problem_shapes, KernelHardwareInfo hw_info, BlockShape cta_shape, ClusterShape cluster_shape) {
uint32_t total_ctas = 0;
uint32_t cta_in_N_dim = 1; // We linearize the blocks across all the problems here
// If host problem shapes are not provided.
if (!problem_shapes.is_host_problem_shape_available()) {
total_ctas = hw_info.sm_count;
}
// If host problem shapes are provided, make a better decision about possibility to launch smaller grid.
else {
for (int group = 0; group < groups; group++) {
auto ctas_along_m = cute::size(cute::ceil_div(cute::shape<0>(problem_shapes.get_host_problem_shape(group)), cute::shape<0>(cta_shape)));
auto ctas_along_n = cute::size(cute::ceil_div(cute::shape<1>(problem_shapes.get_host_problem_shape(group)), cute::shape<1>(cta_shape)));
auto problem_blocks_m = round_up(ctas_along_m, cute::get<0>(cluster_shape));
auto problem_blocks_n = round_up(ctas_along_n, cute::get<1>(cluster_shape));
total_ctas += problem_blocks_m * problem_blocks_n;
}
}
return Params::get_tiled_cta_shape_mnl(
to_gemm_coord(cluster_shape),
total_ctas, cta_in_N_dim
);
}
static bool
can_implement(Arguments const& args) {
return true;
}
PersistentTileSchedulerSm90Group() = default;
CUTLASS_DEVICE explicit PersistentTileSchedulerSm90Group(Params const& params_) : scheduler_params(params_) {
// MSVC requires protecting use of CUDA-specific nonstandard syntax,
// like blockIdx and gridDim, with __CUDA_ARCH__.
#if defined(__CUDA_ARCH__)
if (scheduler_params.raster_order_ == RasterOrder::AlongN) {
current_work_linear_idx_ = uint64_t(blockIdx.x) + uint64_t(blockIdx.y) * uint64_t(gridDim.x);
}
else {
current_work_linear_idx_ = uint64_t(blockIdx.x) * uint64_t(gridDim.y) + uint64_t(blockIdx.y);
}
total_grid_size_ = uint64_t(gridDim.x) * uint64_t(gridDim.y) * uint64_t(gridDim.z);
uint64_t ctas_along_m, ctas_along_n;
if (is_tuple<decltype(cute::shape<0>(params_.problem_shapes_[0]))>::value ||
is_tuple<decltype(cute::shape<1>(params_.problem_shapes_[0]))>::value) {
ctas_along_m = cute::size(cute::ceil_div(cute::shape<0>(params_.problem_shapes_[0]), scheduler_params.cta_shape_.m()));
ctas_along_n = cute::size(cute::ceil_div(cute::shape<1>(params_.problem_shapes_[0]), scheduler_params.cta_shape_.n()));
}
else {
ctas_along_m = scheduler_params.divmod_cta_shape_m_.divide(cute::shape<0>(params_.problem_shapes_[0]) + scheduler_params.divmod_cta_shape_m_.divisor - 1);
ctas_along_n = scheduler_params.divmod_cta_shape_n_.divide(cute::shape<1>(params_.problem_shapes_[0]) + scheduler_params.divmod_cta_shape_n_.divisor - 1);
}
auto problem_blocks_m = round_up(ctas_along_m, (1 << params_.log_swizzle_size_) * params_.cluster_shape_.m());
auto problem_blocks_n = round_up(ctas_along_n, (1 << params_.log_swizzle_size_) * params_.cluster_shape_.n());
current_group_info_.total_tiles = problem_blocks_m * problem_blocks_n;
#else
CUTLASS_ASSERT(false && "This line should never be reached");
#endif
}
CUTLASS_DEVICE
WorkTileInfo
get_current_work() {
return get_current_work_for_linear_idx(current_work_linear_idx_);
}
CUTLASS_DEVICE
WorkTileInfo
get_current_work_for_linear_idx(uint64_t linear_idx) {
if (scheduler_params.pre_processed_problem_shapes && linear_idx >= scheduler_params.blocks_across_problem_) {
return WorkTileInfo::invalid_work_tile();
}
return get_work_idx_m_and_n(linear_idx,
current_group_info_,
scheduler_params.groups_,
scheduler_params.problem_shapes_,
scheduler_params.cta_shape_,
scheduler_params.cluster_shape_,
scheduler_params.divmod_cluster_shape_major_,
scheduler_params.divmod_cluster_shape_minor_,
scheduler_params.divmod_cta_shape_m_,
scheduler_params.divmod_cta_shape_n_,
scheduler_params.log_swizzle_size_,
scheduler_params.raster_order_);
}
CUTLASS_DEVICE
void
advance_to_next_work(uint32_t advance_count = 1) {
current_work_linear_idx_ += total_grid_size_ * uint64_t(advance_count);
}
// get work_idx_m, work_idx_n from linear_idx while applying swizzle
static CUTLASS_DEVICE
WorkTileInfo
get_work_idx_m_and_n(
uint64_t linear_idx,
struct GroupInfo& group_info,
int32_t total_problem_groups,
ProblemShape* problem_shapes,
GemmCoord cta_shape,
GemmCoord cluster_shape,
FastDivmodU64Pow2 const& divmod_cluster_shape_major,
FastDivmodU64Pow2 const& divmod_cluster_shape_minor,
FastDivmodU64 const& divmod_cta_shape_m,
FastDivmodU64 const& divmod_cta_shape_n,
int32_t log_swizzle_size,
RasterOrder raster_order) {
bool valid_tile = true;
uint64_t ctas_along_m, ctas_along_n;
if (is_tuple<decltype(cute::shape<0>(problem_shapes[group_info.group_idx]))>::value ||
is_tuple<decltype(cute::shape<1>(problem_shapes[group_info.group_idx]))>::value) {
ctas_along_m = cute::size(cute::ceil_div(cute::shape<0>(problem_shapes[group_info.group_idx]), cta_shape.m()));
ctas_along_n = cute::size(cute::ceil_div(cute::shape<1>(problem_shapes[group_info.group_idx]), cta_shape.n()));
}
else {
ctas_along_m = divmod_cta_shape_m.divide(cute::shape<0>(problem_shapes[group_info.group_idx]) + divmod_cta_shape_m.divisor - 1);
ctas_along_n = divmod_cta_shape_n.divide(cute::shape<1>(problem_shapes[group_info.group_idx]) + divmod_cta_shape_n.divisor - 1);
}
auto problem_blocks_m = round_up(ctas_along_m, (1 << log_swizzle_size) * cluster_shape.m());
auto problem_blocks_n = round_up(ctas_along_n, (1 << log_swizzle_size) * cluster_shape.n());
group_info.total_tiles = problem_blocks_m * problem_blocks_n;
while (group_info.start_linear_idx + group_info.total_tiles <= linear_idx) {
group_info.group_idx++;
if (group_info.group_idx >= total_problem_groups)
return WorkTileInfo::invalid_work_tile();
group_info.start_linear_idx += group_info.total_tiles;
if (is_tuple<decltype(cute::shape<0>(problem_shapes[group_info.group_idx]))>::value ||
is_tuple<decltype(cute::shape<1>(problem_shapes[group_info.group_idx]))>::value) {
ctas_along_m = cute::size(cute::ceil_div(cute::shape<0>(problem_shapes[group_info.group_idx]), cta_shape.m()));
ctas_along_n = cute::size(cute::ceil_div(cute::shape<1>(problem_shapes[group_info.group_idx]), cta_shape.n()));
}
else {
ctas_along_m = divmod_cta_shape_m.divide(cute::shape<0>(problem_shapes[group_info.group_idx]) + divmod_cta_shape_m.divisor - 1);
ctas_along_n = divmod_cta_shape_n.divide(cute::shape<1>(problem_shapes[group_info.group_idx]) + divmod_cta_shape_n.divisor - 1);
}
problem_blocks_m = round_up(ctas_along_m, (1 << log_swizzle_size) * cluster_shape.m());
problem_blocks_n = round_up(ctas_along_n, (1 << log_swizzle_size) * cluster_shape.n());
group_info.total_tiles = problem_blocks_m * problem_blocks_n;
}
uint64_t cluster_id, cluster_major_offset = 0, cluster_minor_offset = 0;
uint64_t blk_per_grid_dim = divmod_cluster_shape_minor.divide(linear_idx - group_info.start_linear_idx);
divmod_cluster_shape_major(cluster_id, cluster_major_offset, blk_per_grid_dim);
auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster();
if (raster_order == RasterOrder::AlongN) {
cluster_minor_offset = cta_m_in_cluster;
}
else {
cluster_minor_offset = cta_n_in_cluster;
}
uint64_t cluster_idx_minor, cluster_idx_major;
uint64_t cluster_idx_minor_div_swizzle, extra, offset;
offset = cluster_id & ((1 << log_swizzle_size) - 1);
extra = cluster_id >> log_swizzle_size;
uint64_t curr_group_cluster_blk_major;
if (raster_order == RasterOrder::AlongN) {
curr_group_cluster_blk_major = divmod_cluster_shape_major.divide(problem_blocks_n);
}
else {
curr_group_cluster_blk_major = divmod_cluster_shape_major.divide(problem_blocks_m);
}
cluster_idx_minor_div_swizzle = extra / curr_group_cluster_blk_major;
cluster_idx_major = extra % curr_group_cluster_blk_major;
cluster_idx_minor = cluster_idx_minor_div_swizzle * (1 << log_swizzle_size) + offset;
auto minor_work_idx = static_cast<int32_t>(cluster_idx_minor * divmod_cluster_shape_minor.divisor +
cluster_minor_offset);
auto major_work_idx = static_cast<int32_t>(cluster_idx_major * divmod_cluster_shape_major.divisor +
cluster_major_offset);
if (raster_order == RasterOrder::AlongN) {
return {minor_work_idx, major_work_idx, group_info.group_idx, valid_tile};
}
else {
return {major_work_idx, minor_work_idx, group_info.group_idx, valid_tile};
}
}
// Returns whether the block assigned this work should compute the epilogue for the corresponding
// output tile. For the basic tile scheduler, this is always true.
CUTLASS_HOST_DEVICE
static bool
compute_epilogue(WorkTileInfo const&, Params const&) {
return true;
}
// Performs the reduction across splits for a given output tile. Since this scheduler does
// not split output tiles, no reduction is needed.
template <class FrgTensorC>
CUTLASS_DEVICE
static void
fixup(Params const&, WorkTileInfo const&, FrgTensorC&, uint32_t, uint32_t) {}
// Returns whether the current WorkTileInfo passed in should continue to be used. Since
// this scheduler only schedules work in units of single, full output tiles, the WorkTileInfo
// passed in should not be used after having been processed.
CUTLASS_DEVICE
static bool
continue_current_work(WorkTileInfo&) {
return false;
}
// The basic tile scheduler does not require any additional workspace
template <class ProblemShape, class ElementAccumulator>
static size_t
get_workspace_size(Arguments const&, ProblemShape, KernelHardwareInfo const&, uint32_t, const uint32_t = 1) {
return 0;
}
template <class ProblemShape, class ElementAccumulator>
static cutlass::Status
initialize_workspace(Arguments const&, void*, cudaStream_t, ProblemShape, KernelHardwareInfo const&,
uint32_t, const uint32_t = 1, CudaHostAdapter* cuda_adapter = nullptr) {
return Status::kSuccess;
}
template <class ProblemShape_MNKL, class TileShape>
CUTLASS_HOST_DEVICE
static int
get_work_k_tile_count(WorkTileInfo const& work_tile_info, ProblemShape_MNKL problem_shape, TileShape tile_shape) {
// All work units returned by this scheduler cover the entire K iteration
// space of the output tile assigned to the work unit.
return cute::size(cute::ceil_div(cute::get<2>(problem_shape), cute::get<2>(tile_shape)));
}
CUTLASS_HOST_DEVICE
static uint32_t
get_work_k_tile_start(WorkTileInfo const&) {
// All work units returned by this scheduler start from K tile 0
return 0u;
}
CUTLASS_DEVICE
static bool
need_separate_reduction(Params const& params) {
return false;
}
CUTLASS_DEVICE
bool
is_work_tile_for_reduction(WorkTileInfo const& work_tile_info, Params const& params) {
return false;
}
CUTLASS_DEVICE
uint32_t
epilgoue_subtile_idx(WorkTileInfo const& work_tile_info, Params const& params) const {
return 0;
}
template <class FrgTensorC>
CUTLASS_DEVICE
void
separate_reduction(
Params const& params,
WorkTileInfo const& work_tile_info,
FrgTensorC& accumulators,
uint32_t num_barriers,
uint32_t barrier_idx) {
}
// Shares the accumulator set with peers in the global workspace
template <class FrgTensorC>
CUTLASS_DEVICE
static void
share(
Params const& params,
WorkTileInfo const& work_tile_info,
FrgTensorC& accumulators,
uint32_t num_barriers,
uint32_t barrier_idx) {
}
CUTLASS_DEVICE
static bool
valid_warpgroup_in_work_tile(WorkTileInfo const& work_tile_info) {
return true;
}
CUTLASS_DEVICE
static bool
requires_separate_reduction(Params const& params) {
return false;
}
// Kernel helper function to get next work tile
CUTLASS_DEVICE
auto
fetch_next_work(WorkTileInfo work_tile_info) {
if (continue_current_work(work_tile_info)) {
return work_tile_info;
}
advance_to_next_work();
return get_current_work();
}
// Returns the initial work tile info that will be computed over
template <class ClusterShape>
CUTLASS_DEVICE
WorkTileInfo
initial_work_tile_info(ClusterShape) {
return get_current_work();
}
};
} // namespace cutlass::gemm::kernel::detail
| include/cutlass/gemm/kernel/sm90_tile_scheduler_group.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/sm90_tile_scheduler_group.hpp",
"repo_id": "include",
"token_count": 7580
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/permute.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/gemm/threadblock/default_mma_core_wmma.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
/// Gather operand A by using an index array
bool GatherA = false,
/// Gather operand B by using an index array
bool GatherB = false,
/// Permute operand A
typename PermuteALayout = layout::NoPermute,
/// Permute operand B
typename PermuteBLayout = layout::NoPermute
>
struct DefaultMma;
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass Simt)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operand
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false, SharedMemoryClearOption::kNone,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
static_assert(platform::is_same<LayoutC, layout::RowMajor>::value
|| platform::is_same<LayoutC, layout::AffineRankN<2>>::value,
"simt epilogue must be row major");
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC,
arch::OpClassSimt, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA,
GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB,
GatherB, PermuteBLayout>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
LayoutC, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false, SharedMemoryClear,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA,
GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB,
GatherB, PermuteBLayout>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<float, LayoutA, kAlignmentA, float, LayoutB,
kAlignmentB, float, layout::RowMajor,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false, SharedMemoryClearOption::kNone,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, float, LayoutA, float,
LayoutB, float, layout::RowMajor, arch::OpClassTensorOp, 2,
arch::OpMultiplyAddFastF16>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
float, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA,
GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
float, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB,
GatherB, PermuteBLayout>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, float,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for column-major-interleaved output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Number of Interleaved K
int InterleavedK>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass,
ArchTag, ThreadblockShape, WarpShape, InstructionShape, 2,
Operator, true, SharedMemoryClearOption::kNone, false, false,
layout::NoPermute, layout::NoPermute> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, 2, Operator,
true>;
static_assert(kAlignmentA == 128 / sizeof_bits<ElementA>::value,
"Alignment must match thread data map's vector length");
static_assert(kAlignmentB ==128 / sizeof_bits<ElementB>::value,
"Alignment must match thread data map's vector length");
// Define iterators over tiles from the A operand
using IteratorA = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>, ElementA,
LayoutA, 1, typename MmaCore::IteratorThreadMapA>;
// Define iterators over tiles from the B operand
using IteratorB = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>, ElementB,
LayoutB, 0, typename MmaCore::IteratorThreadMapB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>,
typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operand
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, Stages, Operator, false, SharedMemoryClearOption::kNone,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
static_assert(platform::is_same<LayoutC, layout::RowMajor>::value
|| platform::is_same<LayoutC, layout::AffineRankN<2>>::value,
"simt epilogue must be row major");
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC, arch::OpClassSimt,
Stages, Operator>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA, GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB, GatherB, PermuteBLayout>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, LayoutC,
typename MmaCore::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operand
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, Stages, Operator, false, SharedMemoryClear,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
static_assert(platform::is_same<LayoutC, layout::RowMajor>::value
|| platform::is_same<LayoutC, layout::AffineRankN<2>>::value,
"simt epilogue must be row major");
static cutlass::arch::CacheOperation::Kind const CacheOpA =
((sizeof_bits<ElementA>::value * kAlignmentA) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * kAlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, Operator, false, CacheOpA, CacheOpB>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA, GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB, GatherB, PermuteBLayout>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, LayoutC,
typename MmaCore::MmaPolicy, Stages, SharedMemoryClear>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for column-major-interleaved output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Number of Interleaved K
int InterleavedK>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass,
ArchTag, ThreadblockShape, WarpShape, InstructionShape,
Stages, Operator, true, SharedMemoryClearOption::kNone,
false, false, layout::NoPermute, layout::NoPermute> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, Stages,
Operator, true>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor,
typename MmaCore::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for SIMT IDP4A Kernels
template <
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Operation performed by GEMM
typename Operator,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape>
struct DefaultMma<int8_t, LayoutA, kAlignmentA, int8_t, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
ArchTag, ThreadblockShape, WarpShape, GemmShape<1, 1, 4>, 2,
Operator, false, SharedMemoryClearOption::kNone,
false, false, layout::NoPermute, layout::NoPermute> {
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using ElementB = int8_t;
using OperatorClass = arch::OpClassSimt;
static const bool transposeA = platform::is_same< LayoutA, layout::ColumnMajor >::value;
static const bool transposeB = platform::is_same< LayoutB, layout::RowMajor >::value;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
OperatorClass, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, transposeA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, transposeB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
/// Specialization for Wmma TensorOp operator with 2 staged pipeline
template <
///< Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false, SharedMemoryClearOption::kNone,
false, false, layout::NoPermute, layout::NoPermute> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
LayoutC, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for Wmma TensorOp operator with 1 staged pipeline
template <
///< Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 1, Operator, false, SharedMemoryClearOption::kNone,
false, false, layout::NoPermute, layout::NoPermute> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, 1, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped singlestage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaSingleStage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
LayoutC, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
#endif //CUTLASS_ARCH_WMMA_ENABLED
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/default_mma.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_mma.h",
"repo_id": "include",
"token_count": 12168
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming
expectations about data layout of the global memory fragments, data types,
and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp
instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Template defininng default matrix multiply operators inferred from
/// threadblock tile size, global memory data layout, and target math
/// instruction.
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator = arch::OpMultiplyAddComplex,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global>
struct DefaultMultistageMmaComplexCore;
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/default_multistage_mma_complex_core.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_multistage_mma_complex_core.h",
"repo_id": "include",
"token_count": 1405
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_planar_complex_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Transformation applied to A
ComplexTransform TransformA = ComplexTransform::kNone,
/// Transformation applied to B
ComplexTransform TransformB = ComplexTransform::kNone
>
class MmaPlanarComplexPipelined :
public MmaPlanarComplexBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = MmaPlanarComplexBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Policy describing tuning details
using Policy = Policy_;
using ArchTag = typename Policy::Operator::ArchTag;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
/// Transformation applied to A
static ComplexTransform const kTransformA = TransformA;
/// Transformation applied to B
static ComplexTransform const kTransformB = TransformB;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = ArrayPlanarComplex<
typename Policy::Operator::FragmentC::Element,
Policy::Operator::FragmentC::kElements
>;
/// Warp-level Mma
using Operator = typename Policy::Operator;
private:
using FragmentA = typename IteratorA::Fragment;
using FragmentB = typename IteratorB::Fragment;
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaPlanarComplexPipelined(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
private:
CUTLASS_DEVICE
void warp_mma_planar_complex(
Operator & warp_mma,
FragmentC &accum,
WarpFragmentA const & real_A,
WarpFragmentA const & imag_A,
WarpFragmentB const & real_B,
WarpFragmentB const & imag_B) {
cutlass::negate<Array<typename WarpFragmentB::Element, WarpFragmentB::kElements>> neg_op_B;
WarpFragmentB neg_real_B = neg_op_B(real_B);
WarpFragmentB neg_imag_B = neg_op_B(imag_B);
warp_mma(accum.real, real_A, real_B, accum.real);
if (kTransformB == ComplexTransform::kNone) {
warp_mma(accum.imag, real_A, imag_B, accum.imag);
}
else {
warp_mma(accum.imag, real_A, neg_imag_B, accum.imag);
}
if (kTransformA == ComplexTransform::kNone) {
warp_mma(accum.imag, imag_A, real_B, accum.imag);
}
else {
warp_mma(accum.imag, imag_A, neg_real_B, accum.imag);
}
if (kTransformA == ComplexTransform::kNone ^ kTransformB == ComplexTransform::kNone) {
warp_mma(accum.real, imag_A, imag_B, accum.real);
}
else {
warp_mma(accum.real, imag_A, neg_imag_B, accum.real);
}
}
public:
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A_real,
///< iterator over A operand in global memory
IteratorA iterator_A_imag,
///< iterator over B operand in global memory
IteratorB iterator_B_real,
///< iterator over B operand in global memory
IteratorB iterator_B_imag,
///< initial value of accumulator
FragmentC const &src_accum) {
//
// Prologue
//
// Perform accumulation in the 'd' output operand
accum = src_accum;
FragmentA tb_frag_A_real;
FragmentA tb_frag_A_imag;
FragmentB tb_frag_B_real;
FragmentB tb_frag_B_imag;
tb_frag_A_real.clear();
tb_frag_A_imag.clear();
tb_frag_B_real.clear();
tb_frag_B_imag.clear();
// The last kblock is loaded in the prolog
iterator_A_real.load(tb_frag_A_real);
iterator_A_imag.load(tb_frag_A_imag);
iterator_B_real.load(tb_frag_B_real);
iterator_B_imag.load(tb_frag_B_imag);
++iterator_A_real;
++iterator_A_imag;
++iterator_B_real;
++iterator_B_imag;
this->smem_iterator_A_.store(tb_frag_A_real);
this->smem_iterator_A_.store_with_pointer_offset(tb_frag_A_imag, Base::SharedStorage::kImaginaryStrideA);
this->smem_iterator_B_.store(tb_frag_B_real);
this->smem_iterator_B_.store_with_pointer_offset(tb_frag_B_imag, Base::SharedStorage::kImaginaryStrideB);
++this->smem_iterator_A_;
++this->smem_iterator_B_;
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA warp_frag_real_A[2];
WarpFragmentA warp_frag_imag_A[2];
WarpFragmentB warp_frag_real_B[2];
WarpFragmentB warp_frag_imag_B[2];
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_real_A[0]);
this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[0], Base::SharedStorage::kImaginaryStrideA);
this->warp_tile_iterator_B_.load(warp_frag_real_B[0]);
this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[0], Base::SharedStorage::kImaginaryStrideB);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
Operator warp_mma;
int smem_write_stage_idx = 1;
// Avoid reading out of bounds
iterator_A_real.clear_mask(gemm_k_iterations <= 1);
iterator_A_imag.clear_mask(gemm_k_iterations <= 1);
iterator_B_real.clear_mask(gemm_k_iterations <= 1);
iterator_B_imag.clear_mask(gemm_k_iterations <= 1);
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing
// shared memory loads (which have the tightest latency requirement).
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(tb_frag_A_real);
this->smem_iterator_A_.store_with_pointer_offset(tb_frag_A_imag, Base::SharedStorage::kImaginaryStrideA);
this->smem_iterator_B_.store(tb_frag_B_real);
this->smem_iterator_B_.store_with_pointer_offset(tb_frag_B_imag, Base::SharedStorage::kImaginaryStrideB);
__syncthreads();
++this->smem_iterator_B_;
++this->smem_iterator_A_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
else {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations,
0});
}
smem_write_stage_idx ^= 1;
}
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_real_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideA);
this->warp_tile_iterator_B_.load(warp_frag_real_B[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideB);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k == 0) {
iterator_A_real.load(tb_frag_A_real);
iterator_A_imag.load(tb_frag_A_imag);
iterator_B_real.load(tb_frag_B_real);
iterator_B_imag.load(tb_frag_B_imag);
++iterator_A_real;
++iterator_A_imag;
++iterator_B_real;
++iterator_B_imag;
// Avoid reading out of bounds if this was the last loop iteration
iterator_A_real.clear_mask(gemm_k_iterations <= 2);
iterator_A_imag.clear_mask(gemm_k_iterations <= 2);
iterator_B_real.clear_mask(gemm_k_iterations <= 2);
iterator_B_imag.clear_mask(gemm_k_iterations <= 2);
}
warp_mma_planar_complex(
warp_mma,
accum,
warp_frag_real_A[warp_mma_k % 2],
warp_frag_imag_A[warp_mma_k % 2],
warp_frag_real_B[warp_mma_k % 2],
warp_frag_imag_B[warp_mma_k % 2]);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/mma_planar_complex_pipelined.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/mma_planar_complex_pipelined.h",
"repo_id": "include",
"token_count": 5667
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/functional.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/arch/mma_sm90.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
/// Data type of real & imag members of complex numbers in the SourceFragment
typename RealElement,
/// Destination fragment required by the mma operation
typename DestinationFragment,
/// Source fragment holding complex<RealElement> elements
typename SourceFragment,
/// Number of mma operations performed
typename MmaIterations,
/// Shape of operand elements
typename MmaOperandShape,
/// Complex transform on A operand
ComplexTransform Transform_,
/// Operand A or Operand B
Operand Operand_,
/// Floating-point rounding style
FloatRoundStyle Round_>
struct UnpackComplexConvertAndPackForMma;
// Partial specialization for OperandA and Congruous smem layout
template <
typename RealElement,
typename DestinationFragment,
typename SourceFragment,
typename MmaIterations,
typename MmaOperandShape,
ComplexTransform Transform_,
FloatRoundStyle Round_>
struct UnpackComplexConvertAndPackForMma <
RealElement,
DestinationFragment,
SourceFragment,
MmaIterations,
MmaOperandShape,
Transform_,
Operand::kA,
Round_> {
//
// Type definitions
//
static Operand const kOperand = Operand::kA;
static ComplexTransform const kTransform = Transform_;
static FloatRoundStyle const kRound = Round_;
// Data type of elements in the destination fragment
using MmaElement = typename DestinationFragment::Element;
// Numeric convertor MmaElement <= RealElement
using Converter = NumericConverter<MmaElement, RealElement, kRound>;
// Operand layout parameters
using SourceFragmentLayout = layout::ColumnMajor;
static int const kLdm = MmaIterations::kRow * MmaOperandShape::kRow;
/// Ctor
CUTLASS_DEVICE
UnpackComplexConvertAndPackForMma() {}
CUTLASS_DEVICE
void operator()(DestinationFragment *dest, SourceFragment const &source) {
Converter convert_op;
SourceFragmentLayout layout(kLdm);
CUTLASS_PRAGMA_UNROLL
for(int i=0; i<MmaIterations::kRow; i++) {
int pos = 0;
CUTLASS_PRAGMA_UNROLL
for(int c=0; c<MmaOperandShape::kColumn; c++) {
CUTLASS_PRAGMA_UNROLL
for(int r=0; r<MmaOperandShape::kRow; r++) {
// Logical position of element in source fragment
int row = r + i * MmaOperandShape::kRow;
int col = c;
// Access complex<RealElement> and apply rounding on real and imag parts
MmaElement a = convert_op(source[layout(MatrixCoord{row,col})].real());
MmaElement b = convert_op(source[layout(MatrixCoord{row,col})].imag());
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest[i][pos] = a;
dest[i+MmaIterations::kRow][pos++] = (kTransform == ComplexTransform::kConjugate ? -b : b);
}
}
}
}
};
// Partial specialization for OperandB and Congruous smem layout
template <
typename RealElement,
typename DestinationFragment,
typename SourceFragment,
typename MmaIterations,
typename MmaOperandShape,
ComplexTransform Transform_,
FloatRoundStyle Round_>
struct UnpackComplexConvertAndPackForMma <
RealElement,
DestinationFragment,
SourceFragment,
MmaIterations,
MmaOperandShape,
Transform_,
Operand::kB,
Round_> {
//
// Type definitions
//
static Operand const kOperand = Operand::kB;
static ComplexTransform const kTransform = Transform_;
static FloatRoundStyle const kRound = Round_;
// Data type of elements in the destination fragment
using MmaElement = typename DestinationFragment::Element;
// Numeric convertor MmaElement <= RealElement
using Converter = NumericConverter<MmaElement, RealElement, kRound>;
// Operand layout parameters
using SourceFragmentLayout = layout::RowMajor;
static int const kLdm = MmaIterations::kColumn * MmaOperandShape::kColumn;
/// Ctor
CUTLASS_DEVICE
UnpackComplexConvertAndPackForMma() {}
CUTLASS_HOST_DEVICE
void operator()(DestinationFragment *dest, SourceFragment const &source) {
Converter convert_op;
SourceFragmentLayout layout(kLdm);
CUTLASS_PRAGMA_UNROLL
for(int i=0; i<MmaIterations::kColumn; i++) {
int pos = 0;
CUTLASS_PRAGMA_UNROLL
for(int c=0; c<MmaOperandShape::kColumn; c++) {
CUTLASS_PRAGMA_UNROLL
for(int r=0; r<MmaOperandShape::kRow; r++) {
// Logical position of element in source fragment
int row = r;
int col = c + i * MmaOperandShape::kColumn;
// Access complex<RealElement> apply rounding on real and imag parts
MmaElement a = convert_op(source[layout(MatrixCoord{row,col})].real());
MmaElement b = convert_op(source[layout(MatrixCoord{row,col})].imag());
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest[i][pos] = a;
dest[i+MmaIterations::kColumn][pos++] = (kTransform == ComplexTransform::kConjugate ? -b : b);
}
}
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transform on B operand
ComplexTransform TransformB = ComplexTransform::kNone,
/// Do source operands need more than one elements
bool GeneralizedOperatorElements = false,
/// Used for partial specialization
typename Enable = bool
>
class MmaComplexTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex using real-valued TensorOps
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaComplexTensorOp<
Shape_,
complex<RealElementA>,
LayoutA_,
complex<RealElementB>,
LayoutB_,
complex<RealElementC>,
LayoutC_,
Policy_,
TransformA,
TransformB> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicyTensorOp)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Architecture tag from underlying instruction
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = FragmentB;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'planar complex' in the sense that all real-valued
/// parts are stored consecutively followed by all imaginary parts. This matches the structure
/// of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
static_assert(
FragmentC::kElements == 2 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements,
"Unexpected planar complex fragment length.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
static_assert(MmaOperandA::kElements == 1,
"This implementation only supports math instructions in which exactly one element is needed for the A operand."
"We can geneneralize later.");
static_assert(MmaOperandB::kElements == 1,
"This implementation only supports math instructions in which exactly one element is needed for the B operand."
"We can geneneralize later.");
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.real(), a.real(), b.real(), accum.real());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
operand_A[0] = A[m].real();
operand_B[0] = B[n].real();
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.imag(), a.real(), b.imag(), accum.imag());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
operand_A[0] = A[m].real();
operand_B[0] = (kTransformB == ComplexTransform::kConjugate ? -B[n].imag() : B[n].imag());
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.real(), -a.imag(), b.imag(), accum.real())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
// A imaginary part is intentionally negated
operand_A[0] = (kTransformA == ComplexTransform::kConjugate ? A[m].imag() : -A[m].imag());
operand_B[0] = (kTransformB == ComplexTransform::kConjugate ? -B[n].imag() : B[n].imag());
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
operand_A[0] = (kTransformA == ComplexTransform::kConjugate ? -A[m].imag() : A[m].imag());
operand_B[0] = B[n].real();
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex:
// Operands data type: complex<float>
// Rounding: float -> tfloat32_t (round half_ulp_truncate nearest)
// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
// Output data type: complex<float>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaComplexTensorOp<
Shape_,
complex<float>,
LayoutA_,
complex<float>,
LayoutB_,
complex<float>,
LayoutC_,
Policy_,
TransformA,
TransformB> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of members of complex multiplicand A
using RealElementA = float;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of members of complex multiplicand B
using RealElementB = float;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of members of complex accumulator matrix C
using RealElementC = float;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = typename arch::OpMultiplyAddComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA =
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements * 2>;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB =
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements * 2>;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of complex products operations performed (one complex product needs four mma instructions)
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'planar complex' in the sense that all real-valued
/// parts are stored consecutively followed by all imaginary parts. This matches the structure
/// of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using InstMmaOperandA = typename ArchMmaOperator::FragmentA;
using InstMmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
static_assert(platform::is_same<cutlass::gemm::GemmShape<16, 8, 8>, typename ArchMmaOperator::Shape>::value,
"This implementation only supports mma.m16n8k8 math instructions.");
static_assert(InstMmaOperandA::kElements == 4,
"This implementation only supports math instructions in which exactly four element is needed for the A operand."
"We can geneneralize later.");
static_assert(InstMmaOperandB::kElements == 2,
"This implementation only supports math instructions in which exactly two element is needed for the B operand."
"We can geneneralize later.");
// Instruction Operands A & B holding real part followed by imaginary part for mma operations
InstMmaOperandA const *operand_A = reinterpret_cast<InstMmaOperandA const *>(&A);
InstMmaOperandB const *operand_B = reinterpret_cast<InstMmaOperandB const *>(&B);
//
// Accumulate in place
//
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.real(), a.real(), b.real(), accum.real());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A[m], operand_B[n], *accum);
}
// mma(accum.imag(), a.real(), b.imag(), accum.imag());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A[m], operand_B[n+MmaIterations::kColumn], *accum);
}
// mma(accum.real(), a.imag(), -b.imag(), accum.real())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// negate OperandB to accumulate -(a.imag()*b.imag())
// negating OperandB emits less instrucitons than negating OperandA as OperandB has less elements
negate<InstMmaOperandB> negate_op;
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A[m+MmaIterations::kRow], negate_op(operand_B[n+MmaIterations::kColumn]), *accum);
}
// mma(accum.imag(), a.imag(), b.real(), accum.imag())
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A[m+MmaIterations::kRow], operand_B[n], *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
// Alias types for underlying real-valued matrix multiply operator
using InstMmaOperandA = typename ArchMmaOperator::FragmentA;
using InstMmaOperandB = typename ArchMmaOperator::FragmentB;
//
// Define conversions from source type to instruction operands' type
//
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 900
FloatRoundStyle const kRoundA = FloatRoundStyle::round_to_nearest;
FloatRoundStyle const kRoundB = FloatRoundStyle::round_to_nearest;
#else
FloatRoundStyle const kRoundA = FloatRoundStyle::round_half_ulp_trunc_dntz;
FloatRoundStyle const kRoundB = FloatRoundStyle::round_half_ulp_trunc_dntz;
#endif
detail::UnpackComplexConvertAndPackForMma <
RealElementA,
InstMmaOperandA,
FragmentA,
MmaIterations,
MatrixShape<2, 2>,
kTransformA,
Operand::kA,
kRoundA> convert_A;
detail::UnpackComplexConvertAndPackForMma <
RealElementB,
InstMmaOperandB,
FragmentB,
MmaIterations,
MatrixShape<2, 1>,
kTransformB,
Operand::kB,
kRoundB> convert_B;
// Convert Fragment[A|B] holding complex<RealElement[A|B]> to InstMmaOperand[A|B] holding InstMmaOperand[A|B]::Element
convert_A(reinterpret_cast<InstMmaOperandA *>(&dst_A), A);
convert_B(reinterpret_cast<InstMmaOperandB *>(&dst_B), B);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex:
// Operands data type: complex<double>
// Math instruction: mma.sync.aligned.m16n8k4.f64.f64.f64.f64
// Output data type: complex<double>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaComplexTensorOp<
Shape_,
complex<double>,
LayoutA_,
complex<double>,
LayoutB_,
complex<double>,
LayoutC_,
Policy_,
TransformA,
TransformB,
true> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of members of complex multiplicand A
using RealElementA = double;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of members of complex multiplicand B
using RealElementB = double;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of members of complex accumulator matrix C
using RealElementC = double;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicyTensorOp)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = typename arch::OpMultiplyAddComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = FragmentB;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'planar complex' in the sense that all real-valued
/// parts are stored consecutively followed by all imaginary parts. This matches the structure
/// of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
static_assert(
FragmentC::kElements == 2 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements,
"Unexpected planar complex fragment length.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.real(), a.real(), b.real(), accum.real());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = A[m*MmaOperandA::kElements + mk].real();
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = B[n*MmaOperandB::kElements + nk].real();
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.imag(), a.real(), b.imag(), accum.imag());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = A[m*MmaOperandA::kElements + mk].real();
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = (kTransformB == ComplexTransform::kConjugate ?
-B[n*MmaOperandB::kElements + nk].imag() : B[n*MmaOperandB::kElements + nk].imag());
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.real(), -a.imag(), b.imag(), accum.real())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
// A imaginary part is intentionally negated
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = (kTransformA == ComplexTransform::kConjugate ?
A[m*MmaOperandA::kElements + mk].imag() : -A[m*MmaOperandA::kElements + mk].imag());
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = (kTransformB == ComplexTransform::kConjugate ?
-B[n*MmaOperandB::kElements + nk].imag() : B[n*MmaOperandB::kElements + nk].imag());
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.imag(), a.imag(), b.real(), accum.imag())
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = (kTransformA == ComplexTransform::kConjugate ?
-A[m*MmaOperandA::kElements + mk].imag() : A[m*MmaOperandA::kElements + mk].imag());
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = B[n*MmaOperandB::kElements + nk].real();
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_complex_tensor_op.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_complex_tensor_op.h",
"repo_id": "include",
"token_count": 13631
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/// Tile access iterator
/// Each iteration acess in the tile is
/// used as multiplicand for one
/// warp-level matrix multiplication
template <
/// Size of the tile (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand_,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of one matrix production operation (concept: MatrixShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads = 32,
/// Enable Residual Support
bool EnableResidual = false,
/// Number of partitions along K dimension
int PartitionsK_ = 1
>
class MmaTensorOpMultiplicandTileAccessIterator {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
/// Basic check
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Number of elements accessed per Shared Memory load
static int const kElementsPerAccess =
(sizeof_bits<Element>::value >= 32 ? 1 : 32 / sizeof_bits<Element>::value);
using InstructionCount = MatrixShape<
Shape::kRow / InstructionShape::kRow,
Shape::kColumn / InstructionShape::kColumn
>;
static int const kIterations = (kOperand == Operand::kA) ?
InstructionCount::kColumn : InstructionCount::kRow;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
(kOperand == Operand::kA) ?
(Shape::kRow * InstructionShape::kColumn / kThreads) :
(Shape::kColumn * InstructionShape::kRow / kThreads)
>;
/// Memory access type
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Underlying tensor reference
TensorRef ref_;
/// Extent of tensor
MatrixCoord extent_;
/// Origin
MatrixCoord origin_;
/// Used to load residual tile
bool is_residual_;
/// residual offset of each thread
TensorCoord residual_offset_;
/// Iterations in a tile
int iterations_;
public:
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator(
TensorRef const &ref,
TensorCoord extent,
int lane_id
): ref_(ref), extent_(extent), is_residual_(false), iterations_(0) {
if (kOperand == Operand::kA) {
origin_ = MatrixCoord(lane_id / 4, (lane_id % 4) * kElementsPerAccess);
}
else {
origin_ = MatrixCoord((lane_id % 4) * kElementsPerAccess, lane_id / 4);
}
ref_.add_coord_offset(origin_);
if(EnableResidual) {
// compute residual offset
if (kOperand == Operand::kA) {
typename TensorCoord::Index residual_size =
extent_.column() % Shape::kColumn;
if(residual_size) {
is_residual_ = true;
residual_offset_ = make_Coord(0, residual_size);
}
}
else {
typename TensorCoord::Index residual_size =
extent_.row() % Shape::kRow;
if(residual_size) {
is_residual_ = true;
residual_offset_ = make_Coord(residual_size, 0);
}
}
}
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator(
TensorRef const &ref,
int lane_id
): MmaTensorOpMultiplicandTileAccessIterator(ref,
{Shape::kRow, Shape::kColumn}, lane_id) {
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator &add_tile_offset(TensorCoord const &tile_offset) {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
void advance() {
if(EnableResidual && is_residual_) {
is_residual_ = false;
origin_ += residual_offset_;
ref_.add_coord_offset(residual_offset_);
}
else {
if (kOperand == Operand::kA) {
add_tile_offset({0, 1});
}
else {
add_tile_offset({1, 0});
}
}
iterations_ = 0;
}
/// increase iterations in a tile
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator & operator++() {
iterations_++;
if(iterations_ >= kIterations)
advance();
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
int const kWarpShapeDivisibleInner =
(kOperand == Operand::kA ? InstructionShape::kColumn : InstructionShape::kRow);
// Take advantage of Tensor Op's 8 x 4T access pattern
int const kAccessesInner = (kWarpShapeDivisibleInner / kElementsPerAccess) / 4;
AccessType *access_ptr = reinterpret_cast<AccessType *>(&frag);
if (kOperand == Operand::kA) {
int const kTilesPerInstruction = InstructionShape::kRow / 8;
CUTLASS_PRAGMA_UNROLL
for (int inst_m_idx = 0; inst_m_idx < InstructionCount::kRow; ++inst_m_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_m_idx = 0; access_m_idx < kTilesPerInstruction; ++access_m_idx) {
int access_idx =
access_m_idx + kTilesPerInstruction * (inner_idx + kAccessesInner * inst_m_idx);
MatrixCoord offset(
access_m_idx * 8 + inst_m_idx * InstructionShape::kRow,
inner_idx * 4 * kElementsPerAccess + iterations_ * InstructionShape::kColumn);
MatrixCoord access_coord = origin_ + offset;
// if(access_coord.row() < extent_.row() && access_coord.column() < extent_.column()) {
access_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
ref_.data() + ref_.offset(offset));
// }
// else {
// AccessType zero;
// zero.clear();
// access_ptr[access_idx] = zero;
// }
}
}
}
}
else {
CUTLASS_PRAGMA_UNROLL
for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn; ++inst_n_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
int access_idx = inner_idx + kAccessesInner * inst_n_idx;
MatrixCoord offset(
inner_idx * 4 * kElementsPerAccess + iterations_ * InstructionShape::kRow,
inst_n_idx * 8);
MatrixCoord access_coord = origin_ + offset;
// if(access_coord.row() < extent_.row() && access_coord.column() < extent_.column()) {
access_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
ref_.data() + ref_.offset(offset));
// }
// else {
// AccessType zero;
// zero.clear();
// access_ptr[access_idx] = zero;
// }
}
}
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h",
"repo_id": "include",
"token_count": 4051
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic thread level reduction with specializations for Array<T, N>.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/half.h"
#include "cutlass/functional.h"
namespace cutlass {
namespace reduction {
namespace thread {
/// Structure to compute the thread level reduction
template <typename Op, typename T>
struct Reduce;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial Specialization of Reduce for "plus" (a functional operator)
template <typename T>
struct Reduce< plus<T>, T > {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
plus<T> _op;
return _op(lhs, rhs);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization of Reduce for Array<T, N>
template <typename T, int N>
struct Reduce < plus<T>, Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, 1> operator()(Array<T, N> const &in) const {
Array<T, 1> result;
Reduce< plus<T>, T > scalar_reduce;
result.clear();
CUTLASS_PRAGMA_UNROLL
for (auto i = 0; i < N; ++i) {
result[0] = scalar_reduce(result[0], in[i]);
}
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specializations of Reduce for Array<half_t, N>
template <int N>
struct Reduce < plus<half_t>, Array<half_t, N> > {
CUTLASS_HOST_DEVICE
Array<half_t, 1> operator()(Array<half_t, N> const &input) {
Array<half_t, 1> result;
// If there is only 1 element - there is nothing to reduce
if( N ==1 ){
result[0] = input.front();
} else {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
__half result_d;
Array<half_t, 1> const *in_ptr_half = reinterpret_cast<Array<half_t, 1> const *>(&input);
Array<half_t, 2> const *in_ptr_half2 = reinterpret_cast<Array<half_t, 2> const *>(&input);
__half2 const *x_in_half2 = reinterpret_cast<__half2 const *>(in_ptr_half2);
// Set initial result = first half2, in case N==2
__half2 tmp_result = x_in_half2[0];
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < N/2; ++i) {
tmp_result = __hadd2(x_in_half2[i], tmp_result);
}
result_d = __hadd(__low2half(tmp_result), __high2half(tmp_result));
// One final step is needed for odd "N" (to add the (N-1)th element)
if( N%2 ){
__half last_element;
Array<half_t, 1> tmp_last;
Array<half_t, 1> *tmp_last_ptr = &tmp_last;
tmp_last_ptr[0] = in_ptr_half[N-1];
last_element = reinterpret_cast<__half const &>(tmp_last);
result_d = __hadd(result_d, last_element);
}
Array<half_t, 1> *result_ptr = &result;
*result_ptr = reinterpret_cast<Array<half_t, 1> &>(result_d);
#else
Reduce< plus<half_t>, half_t > scalar_reduce;
result.clear();
CUTLASS_PRAGMA_UNROLL
for (auto i = 0; i < N; ++i) {
result[0] = scalar_reduce(result[0], input[i]);
}
#endif
}
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specializations of Reduce for AlignedArray<half_t, N>
template <int N>
struct Reduce < plus<half_t>, AlignedArray<half_t, N> > {
CUTLASS_HOST_DEVICE
Array<half_t, 1> operator()(AlignedArray<half_t, N> const &input) {
Array<half_t, 1> result;
// If there is only 1 element - there is nothing to reduce
if( N ==1 ){
result[0] = input.front();
} else {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
__half result_d;
AlignedArray<half_t, 1> const *in_ptr_half = reinterpret_cast<AlignedArray<half_t, 1> const *>(&input);
AlignedArray<half_t, 2> const *in_ptr_half2 = reinterpret_cast<AlignedArray<half_t, 2> const *>(&input);
__half2 const *x_in_half2 = reinterpret_cast<__half2 const *>(in_ptr_half2);
// Set initial result = first half2, in case N==2
__half2 tmp_result = x_in_half2[0];
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < N/2; ++i) {
tmp_result = __hadd2(x_in_half2[i], tmp_result);
}
result_d = __hadd(__low2half(tmp_result), __high2half(tmp_result));
// One final step is needed for odd "N" (to add the (N-1)th element)
if( N%2 ){
__half last_element;
AlignedArray<half_t, 1> tmp_last;
AlignedArray<half_t, 1> *tmp_last_ptr = &tmp_last;
tmp_last_ptr[0] = in_ptr_half[N-1];
last_element = reinterpret_cast<__half const &>(tmp_last);
result_d = __hadd(result_d, last_element);
}
Array<half_t, 1> *result_ptr = &result;
*result_ptr = reinterpret_cast<Array<half_t, 1> &>(result_d);
#else
Reduce< plus<half_t>, half_t > scalar_reduce;
result.clear();
CUTLASS_PRAGMA_UNROLL
for (auto i = 0; i < N; ++i) {
result[0] = scalar_reduce(result[0], input[i]);
}
#endif
}
return result;
}
};
}
}
}
| include/cutlass/reduction/thread/reduce.h/0 | {
"file_path": "include/cutlass/reduction/thread/reduce.h",
"repo_id": "include",
"token_count": 2815
} | 35 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Convolution filter format transformation kernel.
*/
#pragma once
#include <algorithm>
#include <random>
#include "cutlass/coord.h"
#include "cutlass/arch/arch.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/cuda_host_adapter.hpp"
#include "cute/int_tuple.hpp"
#include "cute/tensor.hpp"
#include "cute/config.hpp"
namespace cutlass::transform::kernel {
using namespace cute;
enum class FilterFormat {
CKTRS,
CTRSK,
KTRSC
};
template <
FilterFormat SrcFormat,
FilterFormat DstFormat,
int NumDimensions,
class Element,
int AlignmentBytes = 16
>
struct ConvFilterFormatTransformer {
static_assert(SrcFormat == FilterFormat::CKTRS, "Currently only source format of CKTRS is supported");
static_assert(DstFormat == FilterFormat::CTRSK || DstFormat == FilterFormat::KTRSC, "Currently only destination format of CTRSK/KTRSC is supported");
static_assert(AlignmentBytes % static_cast<int>(sizeof(Element)) == 0, "Invalid alignment setting");
// In ktrsc order.
using FilterExtent = array<int, NumDimensions>;
// Default cta tile shape: 32x32
static constexpr auto CTATileShape = make_shape(Int<4 * AlignmentBytes / static_cast<int>(sizeof(Element))>{}, Int<32>{});
// Default thread layout: (4, 32)
static constexpr auto ThreadLayout = make_layout(make_shape(Int<4>{}, Int<32>{}));
static constexpr uint32_t MaxThreadsPerBlock = 128;
static constexpr uint32_t MinBlocksPerMultiprocessor = 1;
using ArchTag = arch::Sm90;
// Default ctor
CUTLASS_HOST_DEVICE
ConvFilterFormatTransformer() {}
struct Arguments {
const void *src_ptr;
void *dst_ptr;
FilterExtent filter_extent;
};
struct Params {
using TensorSrc = decltype(make_tensor(make_gmem_ptr(recast_ptr<const Element>(nullptr)), make_layout(take<0,NumDimensions>(FilterExtent{}))));
using TensorDst = decltype(make_tensor(make_gmem_ptr(recast_ptr<Element>(nullptr)), make_layout(make_shape(int32_t(0), int32_t(0)))));
TensorSrc src;
TensorDst dst;
};
struct SharedStorage {
/* empty, no smem needed */
};
static constexpr int SharedStorageSize = sizeof(SharedStorage);
static Status
can_implement(Arguments const& args) {
return Status::kSuccess;
}
static size_t
get_workspace_size(Arguments const& args) {
return 0;
}
static dim3
get_block_shape() {
return dim3(size(shape(ThreadLayout)), 1, 1);
}
static dim3
get_grid_shape(Params const& params) {
auto dim_m = ceil_div(size<0>(shape(params.dst)), get<0>(CTATileShape));
auto dim_n = ceil_div(size<1>(shape(params.dst)), get<1>(CTATileShape));
return dim3(dim_m, dim_n, 1);
}
static cutlass::Status
initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr) {
return Status::kSuccess;
}
static Params
to_underlying_arguments(Arguments & args, void* workspace) {
auto k = args.filter_extent[0];
auto c = args.filter_extent[NumDimensions - 1];
auto srt = reverse(take<1,NumDimensions - 1>(args.filter_extent));
// source shape (s,r,t,k,c)
auto shape_src = flatten(make_shape(srt, k, c));
auto shape_dst = DstFormat == FilterFormat::CTRSK ? make_shape(k, c * product(srt)) : make_shape(c, k * product(srt));
auto src = make_tensor(make_gmem_ptr(recast_ptr<const Element>(args.src_ptr)), make_layout(shape_src));
auto dst = make_tensor(make_gmem_ptr(recast_ptr<Element>(args.dst_ptr)), make_layout(shape_dst));
return Params{src, dst};
}
CUTLASS_DEVICE
void operator()(Params const& params, char *smem_buf) {
// Tile the input tensor into blocks
auto block_coord = make_coord(blockIdx.x, blockIdx.y);
auto block_shape = make_shape(Int<4 * AlignmentBytes / static_cast<int>(sizeof(Element))>{}, Int<32>{});
// Default thread layout: (4, 32)
auto thread_layout = make_layout(make_shape(Int<4>{}, Int<32>{}));
auto vec_layout = make_layout(make_shape(Int<AlignmentBytes / static_cast<int>(sizeof(Element))>{}, Int<1>{}));
Tensor tile_D = local_tile(params.dst, block_shape, block_coord);
// Construct tiled copy
using AccessType = cutlass::AlignedArray<Element, size(vec_layout)>;
using Atom = Copy_Atom<UniversalCopy<AccessType>, Element>;
auto tiled_copy = make_tiled_copy(Atom{}, thread_layout, vec_layout);
auto thr_copy = tiled_copy.get_thread_slice(threadIdx.x);
Tensor thr_tile_D = thr_copy.partition_D(tile_D);
// shape (s, r, t)
auto shape_trs = take<0, NumDimensions - 2>(shape(params.src));
// strided_c = c for format CTRSK, strided_c = k for format KTRSC
auto strided_c = DstFormat == FilterFormat::CTRSK ? get<NumDimensions - 1>(shape(params.src)) : get<NumDimensions - 2>(shape(params.src));
// shape (s, r, t, c) for format CTRSK and shape (s, r, t, k) for format KTRSC
auto shape_ctrs = append<NumDimensions - 1>(shape_trs, strided_c);
auto srtc_coord = idx2crd(int(blockIdx.y * get<1>(block_shape) + threadIdx.x / size<0>(thread_layout)), shape_ctrs);
// index of k for format CTRSK and index of c for format KTRSC
auto n_layout = make_layout(make_shape(gridDim.x, size<0>(thread_layout)), make_stride(size<0>(block_shape), size<0>(vec_layout)));
int n_idx = n_layout(make_coord(blockIdx.x, threadIdx.x % size<0>(thread_layout)));
// Fragment to load from S and store to D
auto frag = make_fragment_like(thr_tile_D);
// Predicate tensor.
Tensor thr_tile_P = make_tensor<bool>(shape(thr_tile_D));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(frag); ++i) {
auto srt_coord = take<0, NumDimensions - 2>(srtc_coord);
auto kc_coord = DstFormat == FilterFormat::CTRSK ?
make_coord(n_idx+i, get<NumDimensions - 2>(srtc_coord)) :
make_coord(get<NumDimensions - 2>(srtc_coord), n_idx+i);
auto coord = flatten(make_coord(srt_coord, kc_coord));
frag(i) = params.src(coord);
thr_tile_P(i) = elem_less(coord, shape(params.src));
}
// Copy from RMEM to GMEM
copy_if(tiled_copy, thr_tile_P, frag, thr_tile_D);
}
};
} // namespace cutlass::transform::kernel
| include/cutlass/transform/kernel/filter_format_transformer.hpp/0 | {
"file_path": "include/cutlass/transform/kernel/filter_format_transformer.hpp",
"repo_id": "include",
"token_count": 2826
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of loading small
vectors from the global memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedVectorAccessIterator
///
template <
/// Shape of the vector accessed by the entire threadblock
typename Shape,
/// Shape of the vector accessed by the warp
typename WarpShape,
/// Type of Element
typename Element,
/// Layout of the vector
typename Layout,
/// Number of elements for each access
int ElementsPerAccess,
/// Support residual tile
bool EnableResidualAccess = false
>
class PredicatedVectorAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Vector access iterator specialized for vectors, e.g. scale and bias
/// Thread arrangements are for TensorOps
///
template <
typename Shape_,
typename WarpShape_,
typename Element_,
int ElementsPerAccess,
bool EnableResidualAccess
>
class PredicatedVectorAccessIterator <
Shape_,
WarpShape_,
Element_,
layout::PitchLinear,
ElementsPerAccess,
EnableResidualAccess
> {
public:
using Shape = Shape_;
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
// static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kThreads = 32;
static int const kRowsPerIteration = 8;
static int const kThreadsPerRow = kThreads / kRowsPerIteration;
static int const kThreadsPerRowMask = 0x3;
static int const kIterations = WarpShape::kContiguous / (kThreadsPerRow * kElementsPerAccess);
static int const kWarpCountStrided = Shape::kStrided / WarpShape::kStrided;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Extent of tensor
TensorCoord extent_;
/// pointer offset of each thread
TensorCoord thread_offset_;
/// iteration index
LongIndex iteration_;
/// residual access
bool is_residual_;
/// residual offset of each thread
TensorCoord residual_offset_;
public:
/// Constructs a vector access iterator
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
/// Pointer to the start of the vector
ConstPointer pointer,
/// Extent of vector
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// ID of each participating warp
int warp_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
extent_(extent),
is_residual_(false) {
int warp_offset = (warp_id / kWarpCountStrided) * WarpShape::kContiguous;
// Per-thread offset in logical coordinates of tensor
thread_offset_ = threadblock_offset + TensorCoord(warp_offset, 0) +
TensorCoord((thread_id & kThreadsPerRowMask) * kElementsPerAccess, 0);
set_iteration_index(0);
if(EnableResidualAccess) {
// compute residual offset
typename TensorCoord::Index residual_size = extent_.contiguous() % WarpShape::kContiguous;
if (residual_size) {
is_residual_ = true;
residual_offset_ = make_Coord(residual_size, 0);
}
}
}
/// Construct a PredicatedVectorAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
/// Pointer to start of vector
ConstPointer pointer,
/// Extent of vector
TensorCoord extent,
///< ID of each participating thread
int thread_id,
/// ID of each participating warp
int warp_id)
: PredicatedVectorAccessIterator(pointer, extent, thread_id, warp_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_ = index;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
thread_offset_ =
thread_offset_ +
TensorCoord(WarpShape::kContiguous * tile_offset.contiguous(), 0);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
((thread_offset_.contiguous() + iteration_ * kThreadsPerRow * kElementsPerAccess)
* sizeof_bits<Element>::value / 8));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator &operator++() {
++iteration_;
if(iteration_ >= kIterations)
iteration_ = 0;
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
void advance() {
if(EnableResidualAccess && is_residual_) {
is_residual_ = false;
thread_offset_ += residual_offset_;
}
else
add_tile_offset(TensorCoord(1, 0));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator operator++(int) {
PredicatedVectorAccessIterator self(*this);
operator++();
return self;
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return ((thread_offset_.contiguous() +
iteration_ * kThreadsPerRow * kElementsPerAccess) < extent_.contiguous());
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedVectorAccessIterator for row-major data.
///
template <
typename Shape_,
typename WarpShape_,
typename Element_,
int ElementsPerAccess,
bool EnableResidualAccess
>
class PredicatedVectorAccessIterator<
Shape_,
WarpShape_,
Element_,
layout::RowMajor,
ElementsPerAccess,
EnableResidualAccess
> {
public:
using Shape = Shape_;
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedVectorAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
layout::PitchLinearShape<WarpShape::kColumn, WarpShape::kRow>,
Element,
layout::PitchLinear,
ElementsPerAccess,
EnableResidualAccess>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
static int const kRowsPerIteration = UnderlyingIterator::kRowsPerIteration;
static int const kThreads = UnderlyingIterator::kThreads;
static int const kIterations = UnderlyingIterator::kIterations;
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
///< Pointer to the start of the vector
ConstPointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< ID of each participating warp
int warp_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(pointer, layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id, warp_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedVectorAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
ConstPointer pointer, ///< Pointer to the start of the vector
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
int warp_id ///< ID of each participating warp
)
: PredicatedVectorAccessIterator(pointer, extent, thread_id, warp_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator operator++(int) {
PredicatedVectorAccessIterator self(*this);
operator++();
return self;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
void advance() {
iterator_.advance();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
| include/cutlass/transform/threadblock/predicated_vector_access_iterator.h/0 | {
"file_path": "include/cutlass/transform/threadblock/predicated_vector_access_iterator.h",
"repo_id": "include",
"token_count": 4187
} | 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types
and is safe to use in a union.
*/
#pragma once
#include "cutlass/arch/wmma.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Wmma array type (WmmaFragmentArray holds elements of type nvcuda::wmma::fragment)
template <
/// Element type
typename T,
/// Number of elements in the array
int N,
/// Whether the element type of T is half_t or __half
bool IsHalfType = (platform::is_same<typename T::element_type, cutlass::half_t>::value ||
platform::is_same<typename T::element_type, __half>::value)
>
class WmmaFragmentArray: public Array<T, N, true> {
public:
/// Efficient clear method (override Array::clear())
CUTLASS_HOST_DEVICE
void clear()
{
for(int i = 0; i < Array<T, N, true>::kElements; i++)
{
nvcuda::wmma::fill_fragment((*this)[i], (typename T::element_type)0);
}
}
CUTLASS_HOST_DEVICE
WmmaFragmentArray<T, N>& operator+=(const WmmaFragmentArray<T, N>& rhs)
{
using element_type = typename T::element_type;
plus<T> add;
for (int i = 0; i < Array<T, N, true>::kElements; i++)
{
(*this)[i] = add((*this)[i], rhs[i]);
}
return *this;
}
};
/// Partial specialization for the case in which T::element_type is
/// half_t or __half. This is needed because the cast (typename T::element_type)0
/// in the primary template flags as an error when __CUDA_NO_HALF_CONVERSIONS__
/// is set.
template <
/// Element type
typename T,
/// Number of elements in the array
int N
>
class WmmaFragmentArray<T, N, true>: public Array<T, N, true> {
public:
/// Efficient clear method (override Array::clear())
CUTLASS_HOST_DEVICE
void clear()
{
for(int i = 0; i < Array<T, N, true>::kElements; i++)
{
nvcuda::wmma::fill_fragment((*this)[i], __float2half(0.f));
}
}
CUTLASS_HOST_DEVICE
WmmaFragmentArray<T, N>& operator+=(const WmmaFragmentArray<T, N>& rhs)
{
using element_type = typename T::element_type;
plus<T> add;
for (int i = 0; i < Array<T, N, true>::kElements; i++)
{
(*this)[i] = add((*this)[i], rhs[i]);
}
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
#endif // if defined(CUTLASS_ARCH_WMMA_ENABLED)
| include/cutlass/wmma_array.h/0 | {
"file_path": "include/cutlass/wmma_array.h",
"repo_id": "include",
"token_count": 1451
} | 38 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import subprocess
from cutlass_library import DataTypeTag
import pydot
from cutlass.backend.evt.ir.dag_ir import DAGIR
_COLOR_MAP = {
"load": '"AliceBlue"',
"compute": "LemonChiffon1",
"accumulator": "LightGrey",
"store": "PowderBlue",
"layout": "lightseagreen",
"dag": "darkorange"
}
class EVTGraphDrawer:
"""
Visualize a EVT DAGIR with graphviz
"""
def __init__(
self,
graph: DAGIR,
name: str
):
self._name = name
self._dot_graphs = {}
self._dot_graphs[name] = self._to_dot(graph, name)
def _get_node_style(self, node):
template = {
"shape": "record",
"fillcolor": "#CAFFE3",
"style": '"filled,rounded"',
"fontcolor": "#000000",
}
if node.op in _COLOR_MAP:
template["fillcolor"] = _COLOR_MAP[node.op]
else:
raise NotImplementedError("unknown node op")
if node.disabled:
template["fontcolor"] = "grey"
template["fillcolor"] = "white"
return template
def _get_node_label(self, node):
label = "{" + f"name={node.name}|op={node.op}"
if node.op == "layout":
label += f"|fn={node.fn.__name__}"
for key in node.kwargs:
label += f"|{key}={node.kwargs[key]}"
if node.underlying_impl is not None:
label += f"|impl={type(node.underlying_impl).__name__}"
if node.op == "load":
label += f"|element_output={DataTypeTag[node.underlying_impl.element]}"
elif node.op == "compute":
label += f"|element_compute={DataTypeTag[node.underlying_impl.element_compute]}|element_output={DataTypeTag[node.underlying_impl.element_output]}"
elif node.op == "store":
label += f"|element_store={DataTypeTag[node.underlying_impl.element]}|element_output={DataTypeTag[node.underlying_impl.element_output]}"
elif node.op == "dag":
label += f"|element_output={DataTypeTag[node.underlying_impl.element_output]}"
if node.tensor is not None:
shape = node.tensor.shape
stride = node.tensor.stride
label += f"|shape={shape}|stride={stride}"
if hasattr(node, "store_tensor"):
if node.store_tensor is not None:
store_shape = node.store_tensor.shape
store_stride = node.store_tensor.stride
label += f"|store_shape={store_shape}|stride_stride={store_stride}"
label += "}"
return label
def _to_dot(
self,
graph: DAGIR,
name: str
):
dot_graph = pydot.Dot(name, randir="TB")
for node in graph.nodes_meta:
style = self._get_node_style(node)
label = self._get_node_label(node)
dot_node = pydot.Node(
node.name, label=label, **style
)
dot_graph.add_node(dot_node)
if node.op == "dag":
dot_subgraph = self._to_dot(node.subgraph, name=node.name)
self._dot_graphs[node.name] = dot_subgraph
# Add edges
for src, dst in graph.edges:
weight = graph.get_edge_weight(src, dst)
dot_graph.add_edge(pydot.Edge(src, dst, label=weight))
return dot_graph
def get_dot_graph(self) -> pydot.Dot:
return [(key, self.get_dot_graph_by_name(key)) for key in self._dot_graphs.keys()]
def get_dot_graph_by_name(self, name) -> pydot.Dot:
return self._dot_graphs[name]
def get_main_dot_graph(self) -> pydot.Dot:
return self._dot_graphs[self._name]
| python/cutlass/backend/evt/passes/graph_drawer.py/0 | {
"file_path": "python/cutlass/backend/evt/passes/graph_drawer.py",
"repo_id": "python",
"token_count": 2285
} | 39 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import ctypes
from cuda import __version__, cuda
from cutlass.backend.utils.device import device_cc
_version_splits = [int(x) for x in __version__.split("rc")[0].split(".")]
_supports_cluster_launch = None
def supports_cluster_launch():
global _supports_cluster_launch
if _supports_cluster_launch is None:
major, minor = _version_splits[0], _version_splits[1]
_supports_cluster_launch = device_cc() >= 90 and (major > 11 or (major == 11 and minor >= 8))
return _supports_cluster_launch
class LaunchConfiguration:
def __init__(self, grid=[1, 1, 1], block=[1, 1, 1], smem=0):
self.grid = grid
self.block = block
self.shared_memory_capacity = smem
class ExecutableOperation:
def __init__(self, operation):
self.operation = operation
self.module = None
self.kernel = None
def name(self):
return self.operation.procedural_name()
def emit(self):
return ""
def can_implement(self, configuration, arguments):
raise NotImplementedError()
def get_host_workspace_size(self, arguments):
raise NotImplementedError()
def get_device_workspace_size(self, arguments):
raise NotImplementedError()
def plan(self, arguments):
raise NotImplementedError()
def initialize(self, host_workspace, device_workspace, launch_config, arguments, stream=cuda.CUstream(0)):
raise NotImplementedError()
def run_with_clusters(self, launch_config, kernel_params, stream=cuda.CUstream(0)):
if hasattr(self.operation, "tile_description") and hasattr(self.operation.tile_description, "cluster_shape"):
attr = cuda.CUlaunchAttribute()
attr.value.clusterDim.x, attr.value.clusterDim.y, attr.value.clusterDim.z = self.operation.tile_description.cluster_shape
attr.id = cuda.CUstreamAttrID.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION
attrs = [attr]
# Allow for non-portable cluster sizes
err, = cuda.cuFuncSetAttribute(
self.kernel, cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED, 1)
if err != cuda.CUresult.CUDA_SUCCESS:
return err
else:
attrs = []
config = cuda.CUlaunchConfig()
config.gridDimX, config.gridDimY, config.gridDimZ = launch_config.grid
config.blockDimX, config.blockDimY, config.blockDimZ = launch_config.block
config.blockDimZ = launch_config.block[2]
config.sharedMemBytes = launch_config.shared_memory_capacity
config.hStream = stream
config.attrs = attrs
config.numAttrs = len(attrs)
err, = cuda.cuLaunchKernelEx(
config, f=self.kernel, kernelParams=kernel_params, extra=0)
return err
def run_without_clusters(self, launch_config, kernel_params, stream=cuda.CUstream(0)):
err, = cuda.cuLaunchKernel(
self.kernel,
launch_config.grid[0], launch_config.grid[1], launch_config.grid[2],
launch_config.block[0], launch_config.block[1], launch_config.block[2],
launch_config.shared_memory_capacity,
stream,
kernel_params,
0)
return err
def run(self, host_workspace, device_workspace, launch_config, stream=cuda.CUstream(0)):
cArg = (ctypes.c_char * len(host_workspace)).from_buffer(host_workspace)
packed = (ctypes.c_void_p * 1)()
packed[0] = ctypes.addressof(cArg)
if supports_cluster_launch():
return self.run_with_clusters(launch_config, packed, stream)
else:
return self.run_without_clusters(launch_config, packed, stream)
| python/cutlass/backend/operation.py/0 | {
"file_path": "python/cutlass/backend/operation.py",
"repo_id": "python",
"token_count": 2048
} | 40 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Base operation used for defining high-level CUTLASS operations (e.g., GEMM, Conv2d)
"""
from bisect import bisect_left
from cutlass_library import (
DataType,
DataTypeSize,
MathOperation,
OperationKind,
SharedMemPerCC
)
import cutlass
from cutlass import get_option_registry
from cutlass.backend.evt import EpilogueFunctorVisitor
from cutlass.backend.utils.device import device_cc
from cutlass.epilogue import get_activations, get_activation_epilogue, identity
from cutlass.library_defaults import KernelsForDataType, _generator_ccs
from cutlass.swizzle import get_swizzling_functors
from cutlass.utils import datatypes, check
class OperationBase:
"""
Base operation used for defining high-level CUTLASS operations (e.g., GEMM, Conv2d)
"""
def __init__(self, cc: int = None, kernel_cc: int = None, operation_kind = OperationKind.Gemm):
"""
:param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90
:type cc: int
:param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80
:type kernel_cc: int
:param operation_kind: class of operation that will be performed (e.g., GEMM, Conv)
:type operation_kind: cutlass_library.OperationKind
"""
self.operation_kind = operation_kind
self.cc = cc if cc is not None else device_cc()
self.specified_kernel_cc = kernel_cc is not None
self.current_cc = kernel_cc if kernel_cc is not None else self._find_closest_cc(self.cc)
self.tile_description = None
self._math_operation = None
self.options = get_option_registry().options_for_cc(self.current_cc, operation_kind)
if self.options is None:
raise Exception(f"Invalid or unsupported compute capability: {self.current_cc}")
# Default activation function: identity
self._activation = identity
def _find_closest_cc(self, cc: int) -> int:
"""
Returns the closest CC in _generator_ccs less than or equal to `cc`
:param cc: compute capability to query
:type cc: int
:returns: closest CC in _generator_ccs less than or equal to `cc`
:rtype: int
"""
if cc in _generator_ccs:
return cc
# Find closest CC lower than this CC
idx = bisect_left(_generator_ccs, cc)
if idx == 0:
raise Exception(f'No valid CC to fall back to for {cc}')
return _generator_ccs[idx-1]
def activations(self) -> list:
"""
Returns possible activation functions that can be used
:return: list of activation functions that can be used
:rtype: list
"""
return get_activations()
def swizzling_functors(self) -> list:
"""
Returns possible swizzling functions that can be used
:return: list of swizzling functions that can be used
:rtype: list
"""
return get_swizzling_functors()
def _reset_options(self, cc: int):
"""
Resets the kernel options based on cc
:param cc: compute capability to reset to
:type cc: int
"""
if cc != self.current_cc:
if cc not in _generator_ccs:
raise Exception(f'Invalid CC for CUTLASS kernels: {cc}.')
self.current_cc = cc
self.options = get_option_registry().options_for_cc(self.current_cc, self.operation_kind)
def _verify_scalar(self, scalar, ref_scalar, ref_dtype, name):
"""
Verifies the following properties:
1) Either ``scalar`` or ``ref_scakar`` must be set (i.e., not ``None``)
2) If ``scalar`` is not ``None``, its datatype must match matches the current version
set by the plan (i.e., those in ``ref_dtype``)
If either of these properties does not hold, an exception is raised. If these properties hold and
``scalar`` is not ``None``, ``scalar`` is returned. Otherwise, ``ref_scalar`` is returned.
:param scalar: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type scalar: numpy/cupy/torch scalar
:param ref_scalar: object representing a tensor passed in on construction of this object, or ``None`` if no tensor was passed in
:type ref_scalar: numpy/cupy/torch scalar
:param ref_dtype: data type for the scalar that this object was initialized to
:param name: identifier of the scalar to verify. Used in raising exceptions
:type name: str
:return: valid scalar to use
:rtype: numpy/cupy/torch scalar
"""
if scalar is None:
if ref_scalar is None:
raise Exception(f"Scalar {name} must be set.")
return ref_scalar
if hasattr(scalar, "dtype"):
dtype = datatypes.library_type(scalar.dtype)
if dtype != ref_dtype:
raise Exception(
f"Tensor {name} with type {dtype} does not match expected type {ref_dtype}."
)
return scalar
def _verify_tensor(self, tensor, ref_tensor, ref_dtype, ref_layout, name):
"""
Verifies the following properties:
If ref_dtype is not void:
1) Either ``tensor`` or ``ref_tensor`` must be set (i.e., not ``None``)
2) If ``tensor`` is not ``None``, its datatype and layout must match matches the current versions
set by the plan (i.e., those in ``ref_dtype`` and ``ref_layout``)
If ref_dtype is void:
Neither ``tensor`` nor ``ref_tensor`` are set
If either of these properties does not hold, an exception is raised. If these properties hold and
``tensor`` is not ``None``, ``tensor`` is returned. Otherwise, ``ref_tensor`` is returned.
:param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type tensor: numpy/cupy/torch array/tensor object
:param ref_tensor: object representing a tensor passed in on construction of this object, or ``None`` if no tensor was passed in
:type ref_tensor: numpy/cupy/torch array/tensor object
:param ref_dtype: data type for the tensor that this object was initialized to
:param ref_layout: layout for the tensor that this object was initialized to
:param name: identifier of the tensor to verify. Used in raising exceptions
:type name: str
:return: valid tensor object to use
:rtype: numpy/cupy/torch array/tensor object
"""
if ref_dtype == DataType.void:
if tensor is not None or ref_tensor is not None:
raise Exception("Operands with element DataType.void must not be provided a tensor")
return None
if tensor is None:
if ref_tensor is None:
raise Exception(f"Tensor {name} must be set.")
return ref_tensor
self._verify_type_and_layout(tensor, ref_dtype, ref_layout, name)
return tensor
@property
def opclass(self) -> cutlass.OpcodeClass:
"""
Returns the opcode class currently in use
:return: opcode class currently in use
:rtype: cutlass.OpcodeClass
"""
return self.op_class
@opclass.setter
def opclass(self, oc: cutlass.OpcodeClass):
if isinstance(oc, str):
oc = datatypes.getattr_enum(cutlass.OpcodeClass, oc)
if oc in self.possible_op_classes:
self.op_class = oc
else:
raise Exception(
f'Unsupported operation class {oc} for CC {self.cc} and data type combination '
f'({self._element_a}, {self._element_b}, {self._element_accumulator}) and '
f'layout combination ({self._layout_a}, {self._layout_b}).')
# Changing the op class also changes the possible operations available. Reset these.
self.possible_operations = self.options.operations(
self.op_class, self._element_a, self._element_b,
self._element_accumulator, self._layout_a, self._layout_b, self._math_operation)
# Changing the op class changes the elements per access in the epilogue. Reset this.
if self.epilogue_functor is not None:
self.epilogue_functor = self._reset_epilogue_functor_alignment(self._elements_per_access(), self.epilogue_functor)
@property
def math_operation(self) -> cutlass.MathOperation:
"""
Returns the math operation currently in use
:return: math operation currently in use
:rtype: cutlass.MathOperation
"""
return self._math_operation
@math_operation.setter
def math_operation(self, mo: cutlass.MathOperation):
if isinstance(mo, str):
mo = datatypes.getattr_enum(cutlass.MathOperation, mo)
if not self.specified_kernel_cc:
if self.current_cc == 90:
# CUTLASS 3.0 kernels do not use different math operations. If one is specified, we
# revert to using a CUTLASS 2.x kernel by using SM80-tagged kernels.
cutlass.logger.warning("Reverting to using SM80-tagged kernel. Opclass may change.")
self._reset_options(80)
self._reset_operations(reset_epilogue=False)
elif self.current_cc == 90:
raise Exception("CUTLASS 3.0 kernels do not use different math operations. "
"To use 2.x kernels with a specific math operation, do not set the `kernel_cc`"
"parameter when constructing the plan.")
self._math_operation = mo
self._reset_operations()
def _elements_per_access(self):
if self.op_class == cutlass.OpcodeClass.Simt:
return 1
elif self._element_c != DataType.void:
return 128 // DataTypeSize[self._element_c]
else:
return 128 // max(self.possible_operations.alignments("C"))
def _create_epilogue_functor_activation(self, activation):
"""
Returns the epilogue functor with given activation function
"""
if self.epilogue_functor is None:
elements_per_access = self._elements_per_access()
else:
elements_per_access = self.epilogue_functor.epilogue_vector_length
if not self.specified_kernel_cc:
if self.current_cc == 90 and activation != identity:
# CUTLASS 3.0 kernels in Python currently only support identity activation. If one requests a non-identity activation,
# revert to using a CUTLASS 2.x kernel by using SM80-tagged kernels.
cutlass.logger.warning("Reverting to using SM80-tagged kernel. Opclass may change.")
if self._element_c != self._element_d:
raise Exception("CUTLASS 2.x kernels require element C to be the same as element D")
self._reset_options(80)
self._reset_operations(reset_epilogue=False)
elif (self.cc == 90 and self.current_cc != 90 and activation == identity and self._math_operation is None):
# SM80 fallback kernels are currently used. Since an identity activation is requested,
# we can switch back to using SM90 kernels.
self._reset_options(90)
self._reset_operations(reset_epilogue=False)
else:
if self.current_cc == 90 and activation != identity:
raise Exception("Epilogues with elementwise fusion are not currently supported "
"in the Python interface for 3.x kernels. To use 2.x kernels "
"with fused elementwise epilogues, do not set the `kernel_cc` "
"parameter when constructing the plan.")
return get_activation_epilogue(
activation,
self._element_d,
elements_per_access,
self._element_accumulator,
self._element_accumulator,
)
def _reset_epilogue_functor_activation(self, activation):
"""
Set the epilogue functor based on the provided activation function
"""
self.epilogue_functor = self._create_epilogue_functor_activation(activation)
def _reset_epilogue_functor_alignment(self, alignment, epilogue_functor):
"""
Reset the alignment of the current epilogue functor based on alignment C
"""
if isinstance(epilogue_functor, EpilogueFunctorVisitor):
return epilogue_functor
if epilogue_functor is None or not hasattr(epilogue_functor, 'activation_functor'):
# Identity epilogue does not have 'activation_functor'
activation = identity
else:
activation = epilogue_functor.activation_functor
epilogue_functor = get_activation_epilogue(
activation,
self._element_d,
alignment,
self._element_accumulator,
self._element_accumulator,
)
return epilogue_functor
@property
def activation(self):
"""
Returns the type of the current activation function used
"""
if hasattr(self.epilogue_functor, "activation_functor"):
return self.epilogue_functor.activation_functor
else:
return identity
@activation.setter
def activation(self, act):
"""
Sets the type of the activation function to use
Activation can come with a set of arguments
:param act: type of activation function to use
:type act: str or tuple. e.g. "relu", ("leaky_relu", 0.01)
"""
if isinstance(act, tuple):
if isinstance(act[0], str):
act_fn = getattr(cutlass.backend.epilogue, act[0])
else:
act_fn = act[0]
self._reset_epilogue_functor_activation(act_fn)
self._activation_args = act[1]
self._activation = act[0]
else:
if isinstance(act, str):
act = getattr(cutlass.backend.epilogue, act)
self._reset_epilogue_functor_activation(act)
self._activation = act
@property
def epilogue_visitor(self):
"""
Return the epilogue functor
"""
return self.epilogue_functor
@epilogue_visitor.setter
def epilogue_visitor(self, visitor):
"""
Create the epilogue visitor
"""
self.epilogue_functor = EpilogueFunctorVisitor(self.cc, visitor)
# The epilogue_functor may consume too much shared memory
# Reset the possible operations
if self.cc != 90:
# The shared memory is only a concern for sm90 epilogue
# In sm80, the epilogue and mainloop share the shared memory
return
datatype_comb = self.possible_operations.datatype_comb
layout_comb = self.possible_operations.layout_comb
new_possible_operations = KernelsForDataType(datatype_comb, layout_comb)
for operation in self.possible_operations.all_operations:
td = datatypes.td_from_profiler_op(operation)
# Filter invalid epilogue schedules
if td.epilogue_schedule not in [
cutlass.EpilogueScheduleType.TmaWarpSpecialized,
cutlass.EpilogueScheduleType.TmaWarpSpecializedCooperative]:
continue
epilogue_smem_bytes = self.epilogue_functor.get_smem_size(td)
# Verify the maximum number of mainloop stages
mainloop_smem_per_stage = check.calculate_smem_usage_per_stage(td, OperationKind.Gemm)
smem_capacity_bytes = SharedMemPerCC[self.cc] << 10
mainloop_stages = (smem_capacity_bytes - epilogue_smem_bytes) // mainloop_smem_per_stage
if mainloop_stages < 2:
# Mainloop stages must >= 2
continue
new_possible_operations.add(operation)
if len(new_possible_operations.all_operations) == 0:
raise RuntimeError(
"The epilogue consumes too much shared memory. "
"No valid tile description is found in the generator.")
self.possible_operations = new_possible_operations
def run_setup(self):
"""
Steps that must be taken before caling `plan.run()`
"""
# Initialize the memory pool if, if not already done
cutlass.get_memory_pool()
| python/cutlass/op/op.py/0 | {
"file_path": "python/cutlass/op/op.py",
"repo_id": "python",
"token_count": 7578
} | 41 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for emitting RankK kernels
"""
import enum
import functools
import operator
import os.path
import shutil
try:
import builtins
if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True:
raise ImportError("Disabling attempt to import cutlass_library")
from cutlass_library.library import *
except ImportError:
from library import *
###################################################################################################
#
# Data structure modeling a Rank K update operation
#
###################################################################################################
#
class RankKOperation:
#
def __init__(self, rank_k_kind, arch, tile_description, A, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \
blas_mode = BlasMode.symmetric):
self.blas_mode = blas_mode
self.operation_kind = OperationKind.RankK
self.arch = arch
self.tile_description = tile_description
self.rank_k_kind = rank_k_kind
self.A = A
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
return False
#
def is_mixed_input(self):
return False
#
def is_planar_complex(self):
return False
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
MathOperation.and_popc: 'and'
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
operation_name = 'syrk' if self.blas_mode == BlasMode.symmetric else 'herk'
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, operation_name)
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)]
)
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def fill_mode_name(self):
return "%s" % (ShortFillModeNames[self.C.fill_mode])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.A.alignment, self.C.alignment])
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${fill_mode}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'fill_mode': self.fill_mode_name(),
'alignment': "%d" % self.A.alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitRankKUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self):
self.rank_k_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::RankK<
${element_a}, ${layout_a},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${split_k_serial},
${math_operation}
>;
"""
self.rank_k_complex_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::RankK<
${element_a}, ${layout_a},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${split_k_serial},
${math_operation},
${transform_a},
${blas_mode}
>;
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'fill_mode': FillModeTag[operation.C.fill_mode],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'split_k_serial': 'false',
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'blas_mode': BlasModeTag[operation.blas_mode]
}
rank_k_template = self.rank_k_complex_template if operation.is_complex() else self.rank_k_template
return SubstituteTemplate(rank_k_template, values)
###################################################################################################
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitRankKConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
RankKKind.Universal: EmitRankKUniversalInstance,
}
self.rank_k_kind_wrappers = {
RankKKind.Universal: 'RankKOperation',
}
self.instance_template = {
RankKKind.Universal: """
${compile_guard_start}
manifest.append(new ${rank_k_kind}<
Operation_${operation_name}
>("${operation_name}"));
${compile_guard_end}
"""
}
self.header_template = """
/*
Generated by rank_k_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "rank_k_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.rank_k_kind]()
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.rank_k_kind], {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'rank_k_kind': self.rank_k_kind_wrappers[operation.rank_k_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
| python/cutlass_library/rank_k_operation.py/0 | {
"file_path": "python/cutlass_library/rank_k_operation.py",
"repo_id": "python",
"token_count": 5264
} | 42 |
{
"path": "./../../../../examples/python/02_pytorch_extension_grouped_gemm.ipynb"
}
| python/docs_src/source/externals/02_pytorch_extension_grouped_gemm.nblink/0 | {
"file_path": "python/docs_src/source/externals/02_pytorch_extension_grouped_gemm.nblink",
"repo_id": "python",
"token_count": 39
} | 43 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utility functions for Conv2d tests.
"""
from cutlass_library import SubstituteTemplate
import torch
import cutlass
from cutlass_library import (
ConvKind,
ConvMode,
DataType,
DataTypeNames,
EpilogueScheduleSuffixes,
KernelScheduleSuffixes,
LayoutType,
OpcodeClassNames,
ShortDataTypeNames,
ShortLayoutTypeNames,
SplitKMode,
)
from cutlass.shape import Conv2DProblemSize
from cutlass.utils.datatypes import numpy_type, torch_type
from conv2d_problem_sizes import TestbedConv2dProblemSizes
def get_name_conv2d(
arch,
conv_kind,
element,
element_accumulator,
element_output,
opclass,
threadblock_shape,
warp_count,
instruction_shape,
stages,
iterator_algorithm,
swizzle,
split_k_mode,
split_k_slices,
activation
):
"""
Generates a procedural name for a test case for conv2d
:param arch: compute capability of kernel being generated
:type arch: int
:param conv_kind: the convolution type (i.e. fprop, dgrad, wgrad)
:type conv_kind: str
:param iterator_algorithm: the iterator algorithm applied
:type iterator_algorithm: cutlass_library.library.IteratorAlgorithm
:param element_a: data type of operand A
:param element_b: data type of operand B
:param element_c: data type of operand C
:param element_accumulator: data type used in accumulation
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
:type opclass: cutlass.OpcodeClass
:param threadblock_shape: indexable container of dimensions of threadblock tiles
:param stages: number of pipeline stages to use in the kernel
:type stages: int
:param stride_support: stride support of dgrad
:param alignment: int
:type alignment: int
:return: str
"""
if iterator_algorithm is None:
iterator_algorithm = "AUTO"
if swizzle is None:
swizzle = 1
name_format = "test_SM${arch}_Device_Conv2d_${conv_kind}_${iter_alg}_ImplicitGemm_${eA}nhwc_${eB}nhwc_${eC}nhwc_${opclass}_${acc}_${tbM}x${tbN}x${tbK}_${wM}x${wN}x${wK}_${IM}${IN}${IK}_stage${stages}_swizzle${swizzle}_${split_k_mode}${split_k_slices}_${activation}"
return SubstituteTemplate(
name_format,
{
"arch": str(arch),
"conv_kind": conv_kind,
"iter_alg": iterator_algorithm,
"eA": DataTypeNames[element],
"eB": DataTypeNames[element],
"eC": DataTypeNames[element_output],
"opclass": opclass,
"acc": DataTypeNames[element_accumulator],
"tbM": str(threadblock_shape[0]),
"tbN": str(threadblock_shape[1]),
"tbK": str(threadblock_shape[2]),
"wM": str(threadblock_shape[0] // warp_count[0]),
"wN": str(threadblock_shape[1] // warp_count[1]),
"wK": str(threadblock_shape[2] // warp_count[2]),
"IM": str(instruction_shape[0]),
"IN": str(instruction_shape[1]),
"IK": str(instruction_shape[2]),
"stages": str(stages),
"swizzle": str(swizzle),
"split_k_mode": split_k_mode,
"split_k_slices": str(split_k_slices),
"activation": activation
}
)
def conv2d_few_channel_problemsizes(channels):
problem_sizes = [
Conv2DProblemSize(
1, 8, 8, channels,
16, 3, 3, channels,
1, 1,
2, 2,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 16, 16, channels,
16, 3, 3, channels,
1, 1,
2, 2,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 16, 16, channels,
16, 7, 7, channels,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 224, 224, channels,
32, 7, 7, channels,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 224, 224, channels,
64, 7, 7, channels,
1, 1,
2, 2,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 224, 224, channels,
64, 5, 5, channels,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 224, 224, channels,
64, 5, 5, channels,
1, 1,
2, 2,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
]
return problem_sizes
def validate_problem_size(ps, conv_kind, split_k_slices):
P = (ps.H + 2 * ps.pad_h - ps.dilation_h * (ps.R - 1) - 1) // ps.stride_h + 1
Q = (ps.W + 2 * ps.pad_w - ps.dilation_w * (ps.S - 1) - 1) // ps.stride_w + 1
if P != ps.P or Q != ps.Q:
return False
# Split-K (serial or parallel) is not supported for strided dgrad
if conv_kind == "dgrad" and split_k_slices > 1 and (ps.stride_h > 1 or ps.stride_w > 1):
return False
return True
class Conv2dLauncherFrontend:
def __init__(self, plan: cutlass.Conv2d, seed: int = 80, backend="numpy"):
self.operation = plan
self.conv_kind = plan.conv_kind
self.seed = seed
self.backend = backend
self.dtype_A = plan._element_a
self.dtype_B = plan._element_b
self.dtype_C = plan._element_c
self.dtype_acc = plan._element_accumulator
self.layout_A = LayoutType.TensorNHWC
self.layout_B = LayoutType.TensorNHWC
self.layout_C = LayoutType.TensorNHWC
self.layout_D = LayoutType.TensorNHWC
self.element_compute = DataType.f32
if self.dtype_A in [cutlass.DataType.f16, cutlass.DataType.bf16]:
self.rand_max = 1
else:
self.rand_max = 4
self.activation = plan.activation
def uniform_init(self, size, dtype):
tensor = torch.ceil(
torch.empty(size=size, dtype=torch_type(dtype), device="cuda").uniform_(-self.rand_max - 0.5, self.rand_max - 0.5)
).to(memory_format=torch.channels_last)
return tensor
def reference(self, ps, A, B, C, alpha, beta, activation):
if self.conv_kind == ConvKind.Fprop:
torch_result = alpha * torch.ops.aten.conv2d(
A,
B,
stride=(ps.stride_h, ps.stride_w),
padding=(ps.pad_h, ps.pad_w),
dilation=(ps.dilation_h, ps.dilation_w)
) + beta * C
elif self.conv_kind == ConvKind.Dgrad:
torch_result = alpha * torch.nn.grad.conv2d_input(
(ps.N, ps.C, ps.H, ps.W),
B,
A,
padding=(ps.pad_h, ps.pad_w),
stride=(ps.stride_h, ps.stride_w)
) + beta * C
elif self.conv_kind == ConvKind.Wgrad:
torch_result = alpha * torch.nn.grad.conv2d_weight(
B,
(ps.K, ps.C, ps.R, ps.S),
A,
padding=(ps.pad_h, ps.pad_w),
stride=(ps.stride_h, ps.stride_w)
) + beta * C
else:
raise Exception(f"Conv kind {self.conv_kind} is currently unsupported.")
if activation == cutlass.backend.epilogue.relu:
torch_result = torch.nn.functional.relu(torch_result)
elif activation == cutlass.backend.epilogue.leaky_relu:
torch_result = torch.nn.functional.leaky_relu(torch_result, 0.5)
return torch_result
def run(self, ps, split_k_mode=SplitKMode.Serial, split_k_slices=1, alpha=1.0, beta=0.0):
if self.conv_kind == ConvKind.Fprop:
tensor_A_size = (ps.N, ps.C, ps.H, ps.W)
tensor_B_size = (ps.K, ps.C, ps.R, ps.S)
tensor_C_size = (ps.N, ps.K, ps.P, ps.Q)
elif self.conv_kind == ConvKind.Dgrad:
tensor_A_size = (ps.N, ps.K, ps.P, ps.Q)
tensor_B_size = (ps.K, ps.C, ps.R, ps.S)
tensor_C_size = (ps.N, ps.C, ps.H, ps.W)
elif self.conv_kind == ConvKind.Wgrad:
tensor_A_size = (ps.N, ps.K, ps.P, ps.Q)
tensor_B_size = (ps.N, ps.C, ps.H, ps.W)
tensor_C_size = (ps.K, ps.C, ps.R, ps.S)
else:
raise Exception(f"Conv kind {self.conv_kind} is not supported")
torch.manual_seed(self.seed)
tensor_A = self.uniform_init(size=tensor_A_size, dtype=self.dtype_A)
tensor_B = self.uniform_init(size=tensor_B_size, dtype=self.dtype_B)
tensor_C = self.uniform_init(size=tensor_C_size, dtype=self.dtype_C)
tensor_D = torch.zeros_like(tensor_C).to(memory_format=torch.channels_last)
args = self.operation.run(tensor_A, tensor_B, tensor_C, tensor_D,
stride=(ps.stride_h, ps.stride_w),
padding=(ps.pad_h, ps.pad_w),
dilation=(ps.dilation_h, ps.dilation_w),
alpha=alpha, beta=beta,
split_k=(split_k_mode, split_k_slices))
args.sync()
tensor_D_ref = self.reference(ps, tensor_A, tensor_B, tensor_C, alpha, beta, self.activation)
torch.cuda.synchronize()
passed = torch.allclose(tensor_D, tensor_D_ref, atol=2e-06)
return passed
def add_test(
cls,
cc,
conv_kind,
problem_sizes,
element,
element_accumulator,
element_output,
opclass,
threadblock_shape,
warp_count,
instruction_shape,
stages,
iterator_algorithm=None,
swizzle=None,
split_k_mode="serial",
split_k_slices=1,
activation = "identity"
):
"""Create a test-running function with the given specification"""
test_name = get_name_conv2d(
cc, conv_kind, element, element_accumulator,
element_output, opclass, threadblock_shape, warp_count, instruction_shape, stages,
iterator_algorithm, swizzle, split_k_mode, split_k_slices, activation)
def run(self):
# Create the plan
plan = cutlass.Conv2d(
kind=conv_kind,
element=element,
element_accumulator=element_accumulator,
element_C=element_output,
element_D=element_output
)
# Set the opclass
plan.opclass = opclass
# Set the tile description
td = {
"threadblock_shape": threadblock_shape,
"warp_count": warp_count,
"stages": stages,
"instruction_shape": instruction_shape,
}
plan.tile_description = td
# Set iterator algorithm
if iterator_algorithm is not None:
plan.iterator_algorithm = iterator_algorithm
# Set swizzling functor
if swizzle is not None:
plan.swizzling_stride = swizzle
if activation != "identity":
if activation == "leaky_relu":
plan.activation = (cutlass.epilogue.leaky_relu, 0.5)
else:
plan.activation = getattr(cutlass.epilogue, activation)
conv2d_launcher = Conv2dLauncherFrontend(plan, 80, backend="torch")
for ps in problem_sizes:
if not validate_problem_size(ps, conv_kind, split_k_slices):
continue
self.assertTrue(conv2d_launcher.run(ps, split_k_mode, split_k_slices, 1.0, 2.0))
setattr(cls, test_name, run)
return run
def get_conv_problems():
# 64: minimum channel size
conv_problems = TestbedConv2dProblemSizes(64).all
# Insert alignment 4 & 2 tests
conv_problems += [
Conv2DProblemSize(
1, 4, 4, 12,
8, 3, 3, 12,
0, 0,
3, 3,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 4, 4, 14,
8, 3, 3, 14,
0, 0,
3, 3,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
Conv2DProblemSize(
1, 23, 56, 98,
128, 3, 3, 98,
4, 5,
3, 3,
1, 1,
ConvMode.CrossCorrelation,
1, 1
),
]
return conv_problems
| test/python/cutlass/conv2d/conv2d_test_utils.py/0 | {
"file_path": "test/python/cutlass/conv2d/conv2d_test_utils.py",
"repo_id": "test",
"token_count": 6885
} | 44 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implicit GEMM testbed sizes for Conv2d problem
*/
#pragma once
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
namespace test {
namespace conv {
namespace device {
using Conv2dProblemVector = std::vector<cutlass::conv::Conv2dProblemSize>;
//
// Structures to prune items from Conv2dProblemVector
//
// Specification template for pruning items for convolution problem lists
template <typename T> struct Specification
{
virtual ~Specification() = default;
virtual bool is_satisfied(T item) const = 0;
};
// input size (NHWC) specification
struct InputSizeSpecification : Specification<cutlass::conv::Conv2dProblemSize>
{
cutlass::Tensor4DCoord input_size;
InputSizeSpecification(cutlass::Tensor4DCoord input_size_) : input_size(input_size_) {}
bool is_satisfied(cutlass::conv::Conv2dProblemSize item) const override {
return ((input_size.n() == item.N) && (input_size.h() == item.H) && (input_size.w() == item.W) && (input_size.c() == item.C));
}
};
// stride (stride_h, stride_w) specification
struct StrideSpecification : Specification<cutlass::conv::Conv2dProblemSize>
{
cutlass::MatrixCoord stride;
StrideSpecification(cutlass::MatrixCoord stride_) : stride(stride_) {}
bool is_satisfied(cutlass::conv::Conv2dProblemSize item) const override {
return ((stride.row() == item.stride_h) && (stride.column() == item.stride_h));
}
};
// channel (C,K) specification, must be multiple of minimum channel
struct ChannelDivisibilitySpecification : Specification<cutlass::conv::Conv2dProblemSize>
{
int channel_multiple;
ChannelDivisibilitySpecification(int channel_multiple_) : channel_multiple(channel_multiple_) {}
bool is_satisfied(cutlass::conv::Conv2dProblemSize item) const override {
return ((item.K % channel_multiple == 0) && (item.C % channel_multiple == 0));
}
};
//
// Pruning function for items from Conv2dProblemVector based on a Specification
//
inline Conv2dProblemVector prune(Conv2dProblemVector const &items,
Specification<cutlass::conv::Conv2dProblemSize> const &spec)
{
Conv2dProblemVector pruned_list;
for (auto& p : items)
if (spec.is_satisfied(p))
pruned_list.push_back(p);
return pruned_list;
}
////////////////////////////////////////////////////////////////////////////
/// Structure TestbedConv2dProblemSizes initializes and holds conv default and
/// important network sizes
////////////////////////////////////////////////////////////////////////////
struct TestbedConv2dProblemSizes {
//
// Data members
//
int minimum_channel_size;
Conv2dProblemVector conv2d_default_sizes;
Conv2dProblemVector conv2d_rigorous_sizes;
Conv2dProblemVector conv2d_resnet50_sizes;
Conv2dProblemVector conv2d_resnet50_sizes_perf;
//
// Methods
//
/// Default ctor
TestbedConv2dProblemSizes(int minimum_channel_size_ = 64): minimum_channel_size (minimum_channel_size_) {
initialize_conv2d_default_sizes();
initialize_conv2d_rigorous_sizes();
initialize_conv2d_resnet50_sizes(conv2d_resnet50_sizes, 1 /*batch-size*/);
initialize_conv2d_resnet50_sizes(conv2d_resnet50_sizes_perf, 34 /*batch-size*/);
filter_all();
}
/// Eliminates some illegal cases
void filter_all() {
Conv2dProblemVector *problems_vectors[] = {
&conv2d_default_sizes,
&conv2d_rigorous_sizes,
&conv2d_resnet50_sizes,
&conv2d_resnet50_sizes_perf
};
for (Conv2dProblemVector *problems : problems_vectors) {
Conv2dProblemVector filtered;
for (cutlass::conv::Conv2dProblemSize const & problem : *problems) {
if (!(problem.C % minimum_channel_size)) {
filtered.push_back(problem);
}
}
*problems = filtered;
}
}
// Add a few standard convolution problem sizes
void initialize_conv2d_default_sizes() {
////////////////////////////////////////////////////////////////////////////////////////////
// Small input size x stride (1,1)
// C < CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64}
////////////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 1, 1, minimum_channel_size}, // input size (NHWC)
{8, 1, 1, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 1, 8, minimum_channel_size}, // input size (NHWC)
{8, 1, 3, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 7, 8, minimum_channel_size}, // input size (NHWC)
{8, 3, 3, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 4, 4, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{2, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 5, 5, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{3, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 6, 5, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{3, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 6, 6, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{3, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 7, 7, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////////////
// Small input size x stride (1,1) asymmetric paddings (1, 0, 1, 0)
// C < CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64}
////////////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 1, 1, minimum_channel_size}, // input size (NHWC)
{8, 1, 1, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 1, 8, minimum_channel_size}, // input size (NHWC)
{8, 1, 3, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 7, 8, minimum_channel_size}, // input size (NHWC)
{8, 3, 3, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 4, 4, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{2, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 5, 5, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{3, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 6, 5, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{3, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 6, 6, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{3, 7, 9, minimum_channel_size}, // input size (NHWC)
{8, 7, 7, minimum_channel_size}, // filter size (KRSC)
{1, 0, 1, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////////////
// Small input size x stride (2,2)
// C < CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64}
////////////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 11, 7, minimum_channel_size}, // input size (NHWC)
{8, 1, 1, minimum_channel_size}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 11, 7, minimum_channel_size}, // input size (NHWC)
{8, 3, 3, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 13, 11, minimum_channel_size}, // input size (NHWC)
{8, 1, 1, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 17, 19, minimum_channel_size}, // input size (NHWC)
{16, 2, 2, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 23, 5, minimum_channel_size}, // input size (NHWC)
{16, 3, 3, minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 13, 17, 8}, // input size (NHWC)
{24, 3, 3, 8}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 23, 21, 8}, // input size (NHWC)
{24, 3, 3, 8}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{3, 3}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 20, 24, 8}, // input size (NHWC)
{40, 3, 3, 8}, // filter size (KRSC)
{3, 3, 3, 3}, // padding (pad_h, _, pad_w, _)
{3, 3}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////
// Medium input size (1x16x16x128), filter size (1x1, 2x2, 3x3, 5x5), stride (1, 1)
////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 15, 19, 160}, // input size (NHWC)
{224, 1, 1, 160}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 19, 37, 160}, // input size (NHWC)
{224, 3, 3, 160}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 16, 16, 160}, // input size (NHWC)
{224, 2, 3, 160}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 23, 21, 128}, // input size (NHWC)
{224, 3, 3, 128}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 29, 37, 160}, // input size (NHWC)
{224, 5, 5, 160}, // filter size (KRSC)
{2, 2, 2, 2}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////
// C > CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64}
////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 15, 19, 32 + minimum_channel_size}, // input size (NHWC)
{96, 3, 3, 32 + minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 16, 24, 64 + minimum_channel_size}, // input size (NHWC)
{96, 3, 3, 64 + minimum_channel_size}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////
// Medium input size, filter size (1x1, 3,x3, 5x5, 7x7), stride (2, 2)
////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 13, 16, 288}, // input size (NHWC)
{160, 5, 5, 288}, // filter size (KRSC)
{2, 2, 2, 2}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 55, 51, 256}, // input size (NHWC)
{512, 1, 1, 256}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 71, 80, 32}, // input size (NHWC)
{64, 5, 5, 32}, // filter size (KRSC)
{2, 2, 2, 2}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 224, 224, 8}, // input size (NHWC)
{64, 7, 7, 8}, // filter size (KRSC)
{3, 3, 3, 3}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////
// Medium input size stride (3, 3), filter (3, 3), non-default padding
////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 27, 23, 256}, // input size (NHWC)
{512, 3, 3, 256}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{3, 3}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////
// Medium input size padding > stride, asymmetric filter, padding and striding
////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 27, 31, 256}, // input size (NHWC)
{512, 3, 3, 256}, // filter size (KRSC)
{5, 5, 7, 7}, // padding (pad_h, _, pad_w, _)
{3, 4}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 27, 35, 256}, // input size (NHWC)
{512, 7, 5, 256}, // filter size (KRSC)
{11, 11, 7, 7}, // padding (pad_h, _, pad_w, _)
{3, 5}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
////////////////////////////////////////////////////////////////////////////////////
// Medium input size *mixed* stride (1, 2) and (2, 1),
// filter (3, 3), default padding
////////////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 27, 27, 256}, // input size (NHWC)
{512, 3, 3, 256}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 27, 27, 256}, // input size (NHWC)
{512, 3, 3, 256}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
/////////////////////////////////////////////////////////////////////////////
// Additional input size
/////////////////////////////////////////////////////////////////////////////
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{3, 28, 28, 256}, // input size (NHWC)
{256, 2, 2, 256}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 32, 32, 16}, // input size (NHWC)
{32, 3, 3, 16}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{6, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{32, 24, 32, 32}, // input size (NHWC)
{32, 1, 2, 32}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{4, 4, 5, 128}, // input size (NHWC)
{256, 3, 6, 128}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
{4, 3, 3, 256} // output size (NPQK)
));
conv2d_default_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{4, 2, 3, 256}, // input size (NHWC)
{328, 3, 5, 256}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
{4, 1, 1, 328} // output size (NPQK)
));
}
// Add a few large and rigorous convolution problem sizes
void initialize_conv2d_rigorous_sizes() {
#if CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED
conv2d_rigorous_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 124, 224, 96}, // input size (NHWC)
{24, 7, 7, 96}, // filter size (KRSC)
{1, 229, 129, 32} // output size (NPQK)
));
conv2d_rigorous_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 233, 35, 48}, // input size (NHWC)
{24, 7, 5, 48}, // filter size (KRSC)
{1, 233, 35, 24} // output size (NPQK)
));
#endif
}
// Add resent50 layers to unit testing sizes
void initialize_conv2d_resnet50_sizes(Conv2dProblemVector &conv2d_problem_vector, int batch_size = 1){
#if 0 // Resnet50 first layer (layer_id = 0) with channel = 3 is not supported in cutlass
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
[1, 224, 224, 3], // input size (NHWC)
[64, 7, 7, 3], // filter size (KRSC)
[3, 3, 3, 3], // padding (pad_h, _, pad_w, _)
[2, 2], // stride (stride_h, stride_w)
[1, 1], // dilation (dilation_h, dilation_w)
));
#endif
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 56, 56, 64}, // input size (NHWC)
{256, 1, 1, 64}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 56, 56, 64}, // input size (NHWC)
{64, 1, 1, 64}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 56, 56, 64}, // input size (NHWC)
{64, 3, 3, 64}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 56, 56, 256}, // input size (NHWC)
{64, 1, 1, 256}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 56, 56, 256}, // input size (NHWC)
{512, 1, 1, 256}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 56, 56, 256}, // input size (NHWC)
{128, 1, 1, 256}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 28, 28, 128}, // input size (NHWC)
{128, 3, 3, 128}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 28, 28, 128}, // input size (NHWC)
{512, 1, 1, 128}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 28, 28, 512}, // input size (NHWC)
{128, 1, 1, 512}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 28, 28, 512}, // input size (NHWC)
{1024, 1, 1, 512}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 28, 28, 512}, // input size (NHWC)
{256, 1, 1, 512}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 14, 14, 256}, // input size (NHWC)
{256, 3, 3, 256}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 14, 14, 256}, // input size (NHWC)
{1024, 1, 1, 256}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 14, 14, 1024}, // input size (NHWC)
{256, 1, 1, 1024}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 14, 14, 1024}, // input size (NHWC)
{2048, 1, 1, 1024}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 14, 14, 1024}, // input size (NHWC)
{512, 1, 1, 1024}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 7, 7, 512}, // input size (NHWC)
{512, 3, 3, 512}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 7, 7, 512}, // input size (NHWC)
{2048, 1, 1, 512}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
conv2d_problem_vector.push_back(cutlass::conv::Conv2dProblemSize(
{batch_size, 7, 7, 2048}, // input size (NHWC)
{512, 1, 1, 2048}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
));
}
};
////////////////////////////////////////////////////////////////////////////
/// Structure TestbedGroupConv2dProblemSizes initializes and holds group conv default and
/// important network sizes
////////////////////////////////////////////////////////////////////////////
struct TestbedGroupConv2dProblemSizes {
//
// Data members
//
int threadblock_n;
int threadblock_k;
int minimum_channel_size;
Conv2dProblemVector default_single_group_sizes;
Conv2dProblemVector default_multiple_group_sizes;
//
// Methods
//
/// Default ctor
TestbedGroupConv2dProblemSizes(
int threadblock_n_,
int threadblock_k_,
int minimum_channel_size_ = 64)
: threadblock_n (threadblock_n_),
threadblock_k (threadblock_k_),
minimum_channel_size (minimum_channel_size_) {
initialize_group_conv2d_default_sizes();
filter_all();
}
/// Eliminates some illegal cases
void filter_all() {
Conv2dProblemVector *problems_vectors[] = {
&default_single_group_sizes,
&default_multiple_group_sizes
};
for (Conv2dProblemVector *problems : problems_vectors) {
Conv2dProblemVector filtered;
for (cutlass::conv::Conv2dProblemSize const & problem : *problems) {
if (!((problem.C / problem.groups) % minimum_channel_size)) {
filtered.push_back(problem);
}
}
*problems = filtered;
}
}
// Add a few standard convolution problem sizes
void initialize_group_conv2d_default_sizes() {
////////////////////////////////////////////////////////////////////////////////////
// One group calculated by one or multiple CTAs: k_per_group % CTA::N = 0
// One CTA calculates a single group
////////////////////////////////////////////////////////////////////////////////////
for (int cta_per_group_k = 1; cta_per_group_k < 4; ++cta_per_group_k) {
// groups = 2, 3, 4
for (int groups = 2; groups < 5; ++groups) {
int conv_k = cta_per_group_k * threadblock_n * groups;
default_single_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 8, 8, threadblock_k * 2 * groups}, // input size (NHWC)
{conv_k, 3, 3, threadblock_k * 2}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
groups // groups
));
} // loop groups
} // loop cta_per_group_k
// Partial gemm_k: k_per_group == CTA::N && channels_per_group < CTA::K
default_single_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 8, 8, threadblock_k}, // input size (NHWC)
{threadblock_n * 2, 3, 3, threadblock_k / 2}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
2 // groups
));
// Larger problem sizes
default_single_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 56, 56, 696}, // input size (NHWC)
{768, 3, 3, 232}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
3 // groups
));
default_single_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 14, 14, 1392}, // input size (NHWC)
{1536, 3, 3, 232}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
3 // groups
));
////////////////////////////////////////////////////////////////////////////////////
// One CTA calculate multiple groups: CTA::N % k_per_group = 0
////////////////////////////////////////////////////////////////////////////////////
// 2 groups per CTA
default_multiple_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 8, 8, threadblock_k * 4}, // input size (NHWC)
{threadblock_n, 3, 3, threadblock_k * 2}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
2 // groups
));
// 2 groups per CTA and partial gemm_k
default_multiple_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 8, 8, threadblock_k}, // input size (NHWC)
{threadblock_n, 3, 3, threadblock_k / 2}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
2 // groups
));
// 4 groups per CTA
default_multiple_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 8, 8, threadblock_k * 8}, // input size (NHWC)
{threadblock_n / 2, 3, 3, threadblock_k * 2}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
4 // groups
));
// 4 groups per CTA and partial gemm_k
default_multiple_group_sizes.push_back(cutlass::conv::Conv2dProblemSize(
{1, 8, 8, threadblock_k * 2}, // input size (NHWC)
{threadblock_n / 2, 3, 3, threadblock_k / 2}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation,
1, // split_k_slices
4 // groups
));
}
};
} // namespace device
} // namespace conv
} // namespace test
| test/unit/conv/device/conv2d_problems.h/0 | {
"file_path": "test/unit/conv/device/conv2d_problems.h",
"repo_id": "test",
"token_count": 21819
} | 45 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implicit GEMM testbed
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/reduction/device/tensor_reduce.h"
#include "cutlass/reduction/device/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "conv2d_problems.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/core_io.h"
#include "cutlass/util/tensor_view_io.h"
#include "../cache_testbed_output.h"
namespace test {
namespace conv {
namespace device {
template <typename Conv2d>
class TestbedConv2dWithReduction {
public:
using ElementA = typename Conv2d::ElementA;
using LayoutA = typename Conv2d::LayoutA;
using ElementB = typename Conv2d::ElementB;
using LayoutB = typename Conv2d::LayoutB;
using ElementC = typename Conv2d::ElementC;
using LayoutC = typename Conv2d::LayoutC;
using ElementAccumulator = typename Conv2d::ElementAccumulator;
using ElementCompute = typename Conv2d::ElementCompute;
using EpilogueOutputOp = typename Conv2d::EpilogueOutputOp;
using ElementT = typename EpilogueOutputOp::ElementTensor;
static cutlass::conv::Operator const kConvolutionalOperator = Conv2d::kConvolutionalOperator;
public:
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementAccumulator, LayoutC> tensor_Reduction;
cutlass::HostTensor<ElementT, cutlass::layout::RowMajor> tensor_Tensor;
cutlass::HostTensor<ElementAccumulator, LayoutC> tensor_Final_Reduction;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
public:
TestbedConv2dWithReduction(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) {
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
void initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
int scope = 2;
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope, -scope, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(), view.capacity());
}
else {
}
}
void initialize(
cutlass::conv::Conv2dProblemSize const &problem_size, uint64_t seed = 2019) {
tensor_A.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size));
tensor_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size));
tensor_C.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_Reduction.resize({
1,
1,
(problem_size.N * problem_size.P * problem_size.Q - 1 + Conv2d::ThreadblockShape::kM) / Conv2d::ThreadblockShape::kM,
(problem_size.K)
});
tensor_Final_Reduction.resize({
1,
1,
1,
(problem_size.K)
});
tensor_Tensor.resize({(problem_size.N * problem_size.P * problem_size.Q), problem_size.K});
tensor_D_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_D_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
initialize_tensor(tensor_A.host_view(), init_A, seed);
initialize_tensor(tensor_B.host_view(), init_B, seed * 17);
initialize_tensor(tensor_C.host_view(), init_C, seed * 39);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
tensor_D_reference.sync_device();
}
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Conv2d::UnderlyingKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::conv::Conv2dProblemSize const &problem_size,
cutlass::conv::SplitKMode const &split_k_mode = cutlass::conv::SplitKMode::kSerial,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
#if 0 //display conv2d problem size for debugging
std::cout << problem_size << std::endl
<< "alpha, beta: (" << alpha << ", " << beta << ")" << std::endl
<< "split_k_mode: " << ((split_k_mode == cutlass::conv::SplitKMode::kSerial) ? "(serial)" : "(parallel)") << std::endl
<< std::endl;
#endif
initialize(problem_size);
// configure the operator
Conv2d conv2d_op;
typename Conv2d::Arguments conv2d_args(
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D_computed.device_ref(),
{alpha, beta},
split_k_mode,
tensor_Reduction.device_data(),
tensor_Tensor.device_data(),
static_cast<int>(tensor_Reduction.stride()[0]),
static_cast<int>(tensor_Tensor.stride()[0])
);
// find workspace requirement for parallel split-k reduction
size_t workspace_size = Conv2d::get_workspace_size(conv2d_args);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = conv2d_op.initialize(conv2d_args, workspace.get());
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
// conv2d operation with parallel split-k-mode
if (split_k_mode == cutlass::conv::SplitKMode::kParallel) {
// conv2d output is written to workspace in global memory
conv2d_args.ref_D.reset(reinterpret_cast<ElementC*>(workspace.get()));
// accumulate mma for each cta in k-dimension (1.0 * A * B)
conv2d_args.output_op = {ElementCompute(1), ElementCompute(0)};
// update conv2d operator arguments
status = conv2d_op.update(conv2d_args, workspace.get());
}
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
// run conv2d operator
status = conv2d_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
bool passed = false;
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " device reference error: "
<< cudaGetErrorString(result);
// Final reduction over the partial reduction tensor
using Functor = cutlass::plus<ElementAccumulator>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementAccumulator,
ElementAccumulator,
LayoutC,
Functor,
8,
ElementAccumulator
>;
TensorReduction reduction(tensor_Reduction.extent(), 2);
cutlass::DeviceAllocation<uint8_t> reduction_device_workspace(reduction.workspace_size());
status = reduction.reduce(
tensor_Final_Reduction.device_ref(),
tensor_Reduction.device_ref(),
reduction_device_workspace.get(),
ElementAccumulator());
EXPECT_EQ(status, cutlass::Status::kSuccess);
EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess);
//
// Reference check
//
tensor_D_computed.sync_host();
#if CUTLASS_CONV_TEST_UNIT_REFERENCE_DEVICE_ENABLED
cutlass::reference::device::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D_reference.device_ref(),
alpha,
beta);
// sync host (copy device data to host) for dumping error output in case of mismatches
tensor_D_reference.sync_host();
#else
cutlass::reference::host::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size,
tensor_A.host_ref(),
tensor_B.host_ref(),
tensor_C.host_ref(),
tensor_D_reference.host_ref(),
alpha,
beta);
#endif
passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view());
EXPECT_TRUE(passed);
//
// Reference check on reduction results
//
tensor_Reduction.sync_host();
tensor_Final_Reduction.sync_host();
// compute backwards for reduction results
cutlass::HostTensor<ElementAccumulator, LayoutC> reference_Reduction;
reference_Reduction.resize({
1,
1,
1,
(problem_size.K)
});
for (int k = 0; k < problem_size.K; ++k) {
ElementAccumulator reduced_value = ElementAccumulator();
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
reduced_value += tensor_D_reference.at({n, p, q, k});
}
}
}
reference_Reduction.at({0, 0, 0, k}) = reduced_value;
}
passed = cutlass::reference::host::TensorEquals(
tensor_Final_Reduction.host_view(),
reference_Reduction.host_view()
);
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Conv2d_ImplicitGemm_device_"
<< (split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial_reduction_" : "parallel_reduction_")
<< (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kFprop ? "fprop_" :
(Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ? "dgrad_" : "wgrad_"))
<< "nhwc_"
<< problem_size.N << "x"
<< problem_size.H << "x"
<< problem_size.W << "x"
<< problem_size.C
<< "_krsc_"
<< problem_size.K << "x"
<< problem_size.R << "x"
<< problem_size.S << "x"
<< problem_size.C
<< "_padding_"
<< problem_size.pad_h << "x"
<< problem_size.pad_w
<< "_stride_"
<< problem_size.stride_h << "x"
<< problem_size.stride_w
<< "_dilation_"
<< problem_size.dilation_h << "x"
<< problem_size.dilation_w << "_"
<< (problem_size.mode == cutlass::conv::Mode::kCrossCorrelation ? "xcorr_" : "conv_")
<< Conv2d::ThreadblockShape::kM << "x"
<< Conv2d::ThreadblockShape::kN << "x"
<< Conv2d::ThreadblockShape::kK << "_"
<< Conv2d::WarpShape::kM << "x"
<< Conv2d::WarpShape::kN << "x"
<< Conv2d::WarpShape::kK << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results << problem_size << std::endl;
results
<< "\nA:\n" << tensor_A.host_view() << "\n"
<< "\nB:\n" << tensor_B.host_view() << "\n"
<< "\nC:\n" << tensor_C.host_view() << "\n"
<< "\nD reference:\n" << tensor_D_reference.host_view() << "\n"
<< "\nD computed:\n" << tensor_D_computed.host_view() << "\n"
<< "\nreduction reference:\n" << reference_Reduction.host_view() << "\n"
<< "\nreduction computed:\n" << tensor_Reduction.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// TestAllConv: Runs cutlass::conv::device::ImplicitGemmConvolution operator and compares it with reference
// TestAllConv runs conv operator on default conv problem sizes from test::conv::device::TestbedConv2dProblemSizes
// Additionally, each conv2d test can provide conv problem sizes (conv_test_sizes) and blacklist of sizes
// (conv_blacklist_sizes)
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ImplicitGemm>
bool TestAllConv2dWithReduction(
const Conv2dProblemVector & conv_test_sizes = Conv2dProblemVector(),
const Conv2dProblemVector & conv_blacklist_sizes = Conv2dProblemVector()) {
bool passed = true;
//
// Testbed object
//
TestbedConv2dWithReduction<ImplicitGemm> testbed;
//
// Get conv problem sizes to run conv operator
//
TestbedConv2dProblemSizes conv_problems(128/cutlass::sizeof_bits<typename ImplicitGemm::ElementA>::value);
// Vector of conv2d problem sizes to avoid duplicate runs
Conv2dProblemVector conv_tested_sizes;
Conv2dProblemVector const *problem_vectors[] = {
&conv_test_sizes, // run user specified sizes
&conv_problems.conv2d_default_sizes, // run default and cudnn bug sizes
&conv_problems.conv2d_resnet50_sizes, // run resnet50 sizes
#if CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED
&conv_problems.conv2d_rigorous_sizes, // run large and rigorous sizes if enabled
#endif
};
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for (Conv2dProblemVector const * problem_vector : problem_vectors) {
// Run conv testbed on default convolution sizes
for(auto conv_problem : *problem_vector) {
// Skip blacklist and avoid duplicate problem sizes
if (std::find(conv_blacklist_sizes.begin(), conv_blacklist_sizes.end(), conv_problem) != conv_blacklist_sizes.end() ||
std::find(conv_tested_sizes.begin(), conv_tested_sizes.end(), conv_problem) != conv_tested_sizes.end()) {
continue;
}
//
// Procedurally disable certain cases
//
// CUTLASS DGRAD's *unity* stride specialization only support stride {1, 1}
if ((ImplicitGemm::kConvolutionalOperator ==
cutlass::conv::Operator::kDgrad) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kUnity)) {
if (!((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) {
continue;
}
}
#if 0 // relax restrictions on analytic strided dgrad
// CUTLASS DGRAD's *strided* specialization only support stride >= {2, 2}
if ((ImplicitGemm::kConvolutionalOperator ==
cutlass::conv::Operator::kDgrad) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kStrided)) {
if (((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) {
continue;
}
}
#endif
//
// Test
//
// push back tested problem size to avoid re-running duplicates
conv_tested_sizes.push_back(conv_problem);
// test mode = xcross
passed = testbed.run(
conv_problem,
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// test mode = convolution
passed = testbed.run(
conv_problem.reset_mode(cutlass::conv::Mode::kConvolution),
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
}
}
// CUTLASS DGRAD's *strided* specialization does not support split-k mode
if ((ImplicitGemm::kConvolutionalOperator ==
cutlass::conv::Operator::kDgrad) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kStrided)) {
passed = testbed.run(
cutlass::conv::Conv2dProblemSize(
{1, 56, 56, 8}, // input size (NHWC)
{8, 1, 1, 8}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1}), // dilation (dilation_h, dilation_w)
cutlass::conv::SplitKMode::kSerial,
cutlass::from_real<typename ImplicitGemm::ElementCompute>(2.0),
cutlass::from_real<typename ImplicitGemm::ElementCompute>(2.0));
if (!passed) {
return false;
}
return passed;
}
// Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for
// a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// alpha and beta for local testing, but only runs one value for alpha and beta.
cutlass::conv::Conv2dProblemSize conv2d_split_k_test_size (
{1, 17, 11, 288}, // input size (NHWC)
{160, 3, 3, 288}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
);
// Parallel SplitK is not tested.
cutlass::conv::SplitKMode split_k_modes [] = {
cutlass::conv::SplitKMode::kSerial,
};
int split_k_slices[] = {
1, 2, 3, 4, 201
};
double problem_alpha[] = {
2.0
};
double problem_beta[] = {
2.0
};
for (auto split_k_mode : split_k_modes) {
for (auto split_k_slice : split_k_slices) {
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
passed = testbed.run(
conv2d_split_k_test_size.reset_split_k_slices(split_k_slice),
split_k_mode,
cutlass::from_real<typename ImplicitGemm::ElementCompute>(alpha),
cutlass::from_real<typename ImplicitGemm::ElementCompute>(beta));
if (!passed) {
return false;
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace conv
} // namespace test
| test/unit/conv/device/conv2d_with_reduction_testbed.h/0 | {
"file_path": "test/unit/conv/device/conv2d_with_reduction_testbed.h",
"repo_id": "test",
"token_count": 8620
} | 46 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief unit tests for matrix_coord
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/matrix_coord.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace core {
void test_matrix_coord(cutlass::MatrixCoord::Index row, cutlass::MatrixCoord::Index column) {
cutlass::MatrixCoord matrix_coord(row, column);
EXPECT_EQ(matrix_coord.row(), row);
EXPECT_EQ(matrix_coord.column(), column);
}
void test_matrix_coord_operator_addition() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
auto matrix_coord_c = matrix_coord_a + matrix_coord_b;
EXPECT_EQ(matrix_coord_c.row(), row_a + row_b);
EXPECT_EQ(matrix_coord_c.column(), column_a + column_b);
}
void test_matrix_coord_operator_subtraction() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
auto matrix_coord_c = matrix_coord_a - matrix_coord_b;
EXPECT_EQ(matrix_coord_c.row(), row_a - row_b);
EXPECT_EQ(matrix_coord_c.column(), column_a - column_b);
}
void test_matrix_coord_operator_multiply() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
auto matrix_coord_c = matrix_coord_a * matrix_coord_b;
EXPECT_EQ(matrix_coord_c.row(), row_a * row_b);
EXPECT_EQ(matrix_coord_c.column(), column_a * column_b);
}
void test_matrix_coord_operator_division() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
auto matrix_coord_c = matrix_coord_a / matrix_coord_b;
EXPECT_EQ(matrix_coord_c.row(), row_a / row_b);
EXPECT_EQ(matrix_coord_c.column(), column_a / column_b);
}
void test_matrix_coord_operator_addition_assignment() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
matrix_coord_a += matrix_coord_b;
EXPECT_EQ(matrix_coord_a.row(), row_a + row_b);
EXPECT_EQ(matrix_coord_a.column(), column_a + column_b);
}
void test_matrix_coord_operator_subtraction_assignment() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
matrix_coord_a -= matrix_coord_b;
EXPECT_EQ(matrix_coord_a.row(), row_a - row_b);
EXPECT_EQ(matrix_coord_a.column(), column_a - column_b);
}
void test_matrix_coord_operator_multiply_assignment() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
matrix_coord_a *= matrix_coord_b;
EXPECT_EQ(matrix_coord_a.row(), row_a * row_b);
EXPECT_EQ(matrix_coord_a.column(), column_a * column_b);
}
void test_matrix_coord_operator_division_assignment() {
cutlass::MatrixCoord::Index row_a = 13;
cutlass::MatrixCoord::Index column_a = 42;
cutlass::MatrixCoord::Index row_b = 20;
cutlass::MatrixCoord::Index column_b = 15;
cutlass::MatrixCoord matrix_coord_a(row_a, column_a);
cutlass::MatrixCoord matrix_coord_b(row_b, column_b);
matrix_coord_a /= matrix_coord_b;
EXPECT_EQ(matrix_coord_a.row(), row_a / row_b);
EXPECT_EQ(matrix_coord_a.column(), column_a / column_b);
}
}
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_row12_column24) {
cutlass::MatrixCoord::Index row = 12;
cutlass::MatrixCoord::Index column = 24;
test::core::test_matrix_coord(row, column);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_addition) {
test::core::test_matrix_coord_operator_addition();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_subtraction) {
test::core::test_matrix_coord_operator_subtraction();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_multiply) {
test::core::test_matrix_coord_operator_multiply();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_division) {
test::core::test_matrix_coord_operator_division();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_addition_assignment) {
test::core::test_matrix_coord_operator_addition_assignment();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_subtraction_assignment) {
test::core::test_matrix_coord_operator_subtraction_assignment();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_multiply_assignment) {
test::core::test_matrix_coord_operator_multiply_assignment();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Matrix_Coord, basic_operator_division_assignment) {
test::core::test_matrix_coord_operator_division_assignment();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/core/matrix_coord.cu/0 | {
"file_path": "test/unit/core/matrix_coord.cu",
"repo_id": "test",
"token_count": 2790
} | 47 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <iomanip>
#include <utility>
#include <type_traits>
#include <vector>
#include <numeric>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include "tiled_cp_async_testbed.hpp"
using namespace cute;
TEST(SM80_CuTe_tiled_cp_async, no_swizzle_mn_single_tile)
{
{
using copy_atom = decltype(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{});
using thr_layout = decltype(Layout<Shape <_16, _8>, Stride< _1,_16>>{});
using val_layout = decltype(Layout<Shape<_2,_1>>{});
using tiled_copy = decltype(make_tiled_copy(copy_atom{}, thr_layout{}, val_layout{}));
using smem_layout_atom = decltype(Layout<Shape <_16, _4>, Stride< _1,_16>>{});
using gmem_stride_type = decltype(LayoutLeft{});
test_cp_async_no_swizzle<double, cute::Int<64>, cute::Int<16>, gmem_stride_type, smem_layout_atom, tiled_copy>();
}
{
using copy_atom = decltype(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{});
using thr_layout = decltype(Layout<Shape <_16, _8>, Stride< _1,_16>>{});
using val_layout = decltype(Layout<Shape<_2,_1>>{});
using tiled_copy = decltype(make_tiled_copy(copy_atom{}, thr_layout{}, val_layout{}));
using smem_layout_atom = decltype(Layout<Shape <_16, _4>, Stride< _1,_16>>{});
using gmem_stride_type = decltype(LayoutLeft{});
test_cp_async_no_swizzle<double, cute::Int<128>, cute::Int<16>, gmem_stride_type, smem_layout_atom, tiled_copy>();
}
}
TEST(SM80_CuTe_tiled_cp_async, no_swizzle_k_single_tile)
{
{
using copy_atom = decltype(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{});
using thr_layout = decltype(Layout<Shape <_16, _8>, Stride< _8,_1>>{});
using val_layout = decltype(Layout<Shape<_1,_2>>{});
using tiled_copy = decltype(make_tiled_copy(copy_atom{}, thr_layout{}, val_layout{}));
using smem_layout_atom = decltype(make_ordered_layout(Shape<_128,_16>{}, Step <_2, _1>{}));
using gmem_stride_type = decltype(LayoutRight{});
test_cp_async_no_swizzle<double, cute::Int<128>, cute::Int<16>, gmem_stride_type, smem_layout_atom, tiled_copy>();
}
}
TEST(SM80_CuTe_tiled_cp_async, swizzle_mn_single_tile)
{
{
using copy_atom = decltype(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{});
using thr_layout = decltype(Layout<Shape <_16, _8>, Stride< _1,_16>>{});
using val_layout = decltype(Layout<Shape<_2,_1>>{});
using tiled_copy = decltype(make_tiled_copy(copy_atom{}, thr_layout{}, val_layout{}));
using swizzle_atom = decltype(Swizzle<2,2,2>{});
using smem_layout_atom = decltype(Layout<Shape <_16, _4>, Stride< _1,_16>>{});
using gmem_stride_type = decltype(LayoutLeft{});
test_cp_async_with_swizzle<double, cute::Int<64>, cute::Int<16>, gmem_stride_type, swizzle_atom, smem_layout_atom, tiled_copy>();
}
{
using copy_atom = decltype(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{});
using thr_layout = decltype(Layout<Shape <_16, _8>, Stride< _1,_16>>{});
using val_layout = decltype(Layout<Shape<_2,_1>>{});
using tiled_copy = decltype(make_tiled_copy(copy_atom{}, thr_layout{}, val_layout{}));
using swizzle_atom = decltype(Swizzle<2,2,2>{});
using smem_layout_atom = decltype(Layout<Shape <_16, _4>, Stride< _1,_16>>{});
using gmem_stride_type = decltype(LayoutLeft{});
test_cp_async_with_swizzle<double, cute::Int<128>, cute::Int<16>, gmem_stride_type, swizzle_atom, smem_layout_atom, tiled_copy>();
}
}
TEST(SM80_CuTe_tiled_cp_async, swizzle_k_single_tile)
{
{
using copy_atom = decltype(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<double>, double>{});
using thr_layout = decltype(Layout<Shape < _8,_16>, Stride<_16, _1>>{});
using val_layout = decltype(Layout<Shape<_1,_1>>{});
using tiled_copy = decltype(make_tiled_copy(copy_atom{}, thr_layout{}, val_layout{}));
using swizzle_atom = decltype(Swizzle<2,0,4>{});
using smem_layout_atom = decltype(Layout<Shape <_4,_16>, Stride<_1, _4>>{});
using gmem_stride_type = decltype(LayoutRight{});
test_cp_async_with_swizzle<double, cute::Int<128>, cute::Int<16>, gmem_stride_type, swizzle_atom, smem_layout_atom, tiled_copy>();
}
{
using copy_atom = decltype(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, tfloat32_t>{});
using thr_layout = decltype(Layout<Shape <_16,_8>, Stride< _8,_1>>{});
using val_layout = decltype(Layout<Shape < _1,_4>>{});
using tiled_copy = decltype(make_tiled_copy(copy_atom{}, thr_layout{}, val_layout{}));
using swizzle_atom = decltype(Swizzle<3,2,3>{});
using smem_layout_atom = decltype(Layout<Shape < _8,_32>, Stride<_32, _1>>{});
using gmem_stride_type = decltype(LayoutRight{});
test_cp_async_with_swizzle<tfloat32_t, cute::Int<128>, cute::Int<32>, gmem_stride_type, swizzle_atom, smem_layout_atom, tiled_copy>();
}
}
| test/unit/cute/ampere/tiled_cp_async.cu/0 | {
"file_path": "test/unit/cute/ampere/tiled_cp_async.cu",
"repo_id": "test",
"token_count": 2474
} | 48 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/arch/copy_sm90.hpp>
using namespace cute;
template<class T>
__global__ void
stsm_test_device(uint16_t* g_in, uint16_t* g_out)
{
constexpr int count = sizeof(T) / 4;
int tid = threadIdx.x;
int stride = blockDim.x;
// load input gmem -> rmem
uint32_t reg[count];
for (int i = 0; i < (sizeof(T) / 4); i++) {
reg[i] = reinterpret_cast<uint32_t*>(g_in)[tid + (stride * i)];
}
__shared__ uint32_t smem[32 * count];
// load rmem -> smem using STSM
uint128_t* smem_ptr = reinterpret_cast<uint128_t*>(smem) + tid;
T* rmem_ptr = reinterpret_cast<T*>(reg);
cute::copy_stsm(rmem_ptr, smem_ptr);
__syncthreads();
// store output smem -> gmem
for (int i = 0; i < (sizeof(T) / 4); i++) {
reinterpret_cast<uint32_t*>(g_out)[tid + (stride * i)] = smem[tid + (stride * i)];
}
}
template <class TiledCopy, class SmemLayout>
__global__ void
stsm_test_device_cute(uint16_t* g_in, uint16_t* g_out,
TiledCopy tiled_copy, SmemLayout smem_layout)
{
using namespace cute;
__shared__ uint16_t smem[size(smem_layout)];
Tensor t_g_in = make_tensor(make_gmem_ptr(g_in), smem_layout);
Tensor t_g_out = make_tensor(make_gmem_ptr(g_out), smem_layout);
Tensor t_smem = make_tensor(make_smem_ptr(smem), smem_layout);
int tid = threadIdx.x;
auto thr_copy = tiled_copy.get_thread_slice(tid);
Tensor tXgX = thr_copy.partition_S(t_g_in); // (V,M,N)
Tensor tXsX = thr_copy.partition_D(t_smem); // (V,M,N)
Tensor tXrX = make_tensor<uint16_t>(shape(tXgX)); // (V,M,N)
clear(tXrX); // Just to make sure
/*
if (thread0()) {
print("tXsX: " ); print(tXsX.layout()); print("\n");
print("tXgX: " ); print(tXgX.layout()); print("\n");
print("tXrX: " ); print(tXrX.layout()); print("\n");
}
*/
// Load input gmem -> rmem
copy(tXgX, tXrX);
// Copy rmem -> smem via tiled_copy (STSM, STS)
copy(tiled_copy, tXrX, tXsX);
// Output smem -> gmem
for (int i = tid; i < size(t_smem); i += size(tiled_copy)) {
t_g_out(i) = t_smem(i);
}
}
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
TEST(SM90_CuTe_Hopper, Stsm)
{
constexpr int count = 1024;
thrust::host_vector<uint16_t> h_in(count);
for (int i = 0; i < count; ++i) {
h_in[i] = uint16_t(i);
}
thrust::device_vector<uint16_t> d_in = h_in;
//
// STSM 1x (32b)
//
{
thrust::device_vector<uint16_t> d_out(count);
stsm_test_device<uint32_t><<<1, 32>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 32; ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("STSM 1x stsm_test_device SUCCESS\n");
}
//
// STSM 2x (64b)
//
{
thrust::device_vector<uint16_t> d_out(count);
stsm_test_device<uint64_t><<<1, 32>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 64; ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("STSM 2x stsm_test_device SUCCESS\n");
}
//
// STSM 4x (128b)
//
{
thrust::device_vector<uint16_t> d_out(count);
stsm_test_device<uint128_t><<<1, 32>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 128; ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("STSM 4x stsm_test_device SUCCESS\n");
}
//
// CuTe STSM
//
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U32x1_STSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x1_STSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U32x2_STSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x2_STSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U32x4_STSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x4_STSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<UniversalCopy<uint16_t>, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved STSM.U16 SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U32x1_STSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x1_STSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U32x2_STSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x2_STSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U32x4_STSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x4_STSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<UniversalCopy<uint16_t>, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 STSM.U16 SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U16x2_STSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_2,_1>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x2_STSM_T SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U16x4_STSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_4,_1>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x4_STSM_T SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM90_U16x8_STSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_8,_1>>{});
stsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x8_STSM_T SUCCESS\n");
}
CUTLASS_TRACE_HOST("PASS");
}
#endif
| test/unit/cute/hopper/stsm.cu/0 | {
"file_path": "test/unit/cute/hopper/stsm.cu",
"repo_id": "test",
"token_count": 7105
} | 49 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/gemm/warp/mma_tensor_op_sm70.h"
#include "cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h"
#include "cutlass/core_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_warp_FragmentIterator, mma_f16_64x64x4) {
using Shape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
Shape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor> accumulator_tensor({Shape::kM, Shape::kN});
cutlass::reference::host::TensorFill(accumulator_tensor.host_view(), ElementC(-1));
for (int tid = 0; tid < 1; ++tid) {
typename MmaTensorOp::IteratorC::Fragment accumulator_tile;
CUTLASS_PRAGMA_UNROLL
for (size_t i = 0; i < accumulator_tile.size(); ++i) {
accumulator_tile[i] = static_cast<ElementC>(int(i));
}
using FragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp<
cutlass::gemm::GemmShape<64, 64, 4>,
cutlass::gemm::GemmShape<32, 32, 4>,
cutlass::half_t,
cutlass::layout::RowMajor
>;
FragmentIterator frag_iterator(accumulator_tile);
typename FragmentIterator::Fragment frag;
for (int iter = 0; iter < FragmentIterator::kIterations; ++iter) {
frag_iterator.load(frag);
++frag_iterator;
#if 0
std::cout << "T" << tid << ": ";
for (size_t i = 0; i < frag.size(); ++i) {
std::cout << " " << frag[i];
}
std::cout << std::endl;
#endif
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_warp_FragmentIterator, mma_f32_64x64x4) {
using Shape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using LayoutC = cutlass::layout::RowMajor;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
Shape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
cutlass::HostTensor<ElementC, LayoutC> accumulator_tensor({Shape::kM, Shape::kN});
cutlass::reference::host::TensorFill(accumulator_tensor.host_view(), ElementC(-1));
for (int tid = 0; tid < 1; ++tid) {
typename MmaTensorOp::IteratorC::Fragment accumulator_tile;
CUTLASS_PRAGMA_UNROLL
for (size_t i = 0; i < accumulator_tile.size(); ++i) {
accumulator_tile[i] = static_cast<ElementC>(i);
}
typename MmaTensorOp::IteratorC iterator_C(accumulator_tensor.host_ref(), tid);
iterator_C.store(accumulator_tile);
}
/*
std::ofstream output("volta_mma_f32_64x64x4.csv");
output << accumulator_tensor.host_view() << std::endl;
*/
for (int tid = 0; tid < 1; ++tid) {
typename MmaTensorOp::IteratorC::Fragment accumulator_tile;
using FragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp<
cutlass::gemm::GemmShape<64, 64, 4>,
cutlass::gemm::GemmShape<32, 32, 4>,
ElementC,
LayoutC
>;
FragmentIterator frag_iterator(accumulator_tile);
for (int iter = 0; iter < FragmentIterator::kIterations; ++iter) {
typename FragmentIterator::Fragment frag;
frag_iterator.load(frag);
++frag_iterator;
#if 0
std::cout << "Iteration: " << iter << " - T" << tid << ": ";
for (int i = 0; i < frag.size(); ++i) {
std::cout << " " << frag[i];
}
std::cout << std::endl;
#endif
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/warp/fragment_iterator_volta_tensor_op.cu/0 | {
"file_path": "test/unit/epilogue/warp/fragment_iterator_volta_tensor_op.cu",
"repo_id": "test",
"token_count": 2712
} | 50 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Testbed and host reference for EVT unittest
*/
#pragma once
#include "gemm_testbed_3x.hpp"
namespace test {
namespace gemm {
namespace device {
/// Host-side tapply, tapply in cute is HOST_DEVICE
template <class T, class F, class G, int... I>
constexpr auto
tapply(T&& t, F&& f, G&& g, cute::seq<I...>)
{
return g(f(std::get<I>(static_cast<T&&>(t)))...);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT: Base class for EVT Node
template <
typename Gemm_
>
class HostEVTNodeBase {
public:
using Gemm = Gemm_;
using TestBedImpl = typename detail::TestbedImpl<Gemm, cutlass::epilogue::thread::Identity, true>;
using Kernel = typename Gemm::GemmKernel;
using Epilogue = typename Kernel::CollectiveEpilogue;
using ElementCompute = typename TestBedImpl::ElementCompute;
using ElementScalar = typename TestBedImpl::ElementScalar;
using ElementAccumulator = typename Kernel::ElementAccumulator;
using ElementC = typename Kernel::ElementC;
using ElementD = typename Kernel::ElementD;
using LayoutTagC = typename TestBedImpl::LayoutTagC;
using LayoutTagD = typename TestBedImpl::LayoutTagD;
private:
bool _check_relative_equality;
// Factors used for calculating relative equality. These default
// values are borrowed from those used by default in the CUTLASS
// profiler for performing relative equality checks.
float _epsilon = 0.05f;
float _nonzero_floor = 1.0f / 256.0f;
public:
HostEVTNodeBase(){}
HostEVTNodeBase(bool check_relative_equality):
_check_relative_equality(check_relative_equality) { }
template <
class Element,
class Layout
>
bool equality_check(
cutlass::TensorView<Element, Layout> const& lhs,
cutlass::TensorView<Element, Layout> const& rhs) const {
if (_check_relative_equality) {
return cutlass::reference::host::TensorRelativelyEquals(
lhs, rhs, Element(_epsilon), Element(_nonzero_floor)
);
}
else {
return cutlass::reference::host::TensorEquals(lhs, rhs);
}
}
void* get_tensor_C_ptr() {
return nullptr;
}
void* get_tensor_D_ptr() {
return nullptr;
}
bool compare_reference(std::stringstream& error_ss) {
return true;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Accumulator
template <
typename Gemm
>
class HostAccumulator: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementAccumulator = typename Base::ElementAccumulator;
using ElementCompute = typename Base::ElementCompute;
struct Arguments { };
private:
cutlass::NumericConverter<ElementCompute, ElementAccumulator> accumulator_converter;
public:
HostAccumulator(){}
template<typename ProblemShapeType>
HostAccumulator(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
:Base(check_relative_equality) {}
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
return accumulator_converter(acc);
}
Arguments get_arguments() {
return Arguments{};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Scalar Broadcast
template <
typename Gemm,
int Value,
int BroadcastCount = 1,
template <class> class ReductionFn = cutlass::multiplies
>
class HostScalarBroadcast : public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using ElementCompute = typename Base::ElementCompute;
struct Arguments {
ElementCompute scalar[BroadcastCount] = {0};
ElementCompute const* scalar_ptrs[BroadcastCount] = { nullptr };
cute::Stride<cute::_0,cute::_0,cute::_0> dScalar{};
};
private:
ElementCompute _scalar{};
public:
HostScalarBroadcast(){}
template<typename ProblemShapeType, typename TestBedImpl>
HostScalarBroadcast(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
: Base(check_relative_equality), _scalar(ElementCompute(Value)) {}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
return _scalar;
}
bool compare_reference(std::stringstream& error_ss) {
error_ss << "Scalar: " << float(_scalar) << "\n\n";
return true;
}
Arguments get_arguments() {
if constexpr (BroadcastCount == 1)
return Arguments{{_scalar}, {nullptr}};
else if constexpr (BroadcastCount == 2)
return Arguments{{_scalar, _scalar}, {nullptr, nullptr}};
else if constexpr (BroadcastCount == 3)
return Arguments{{_scalar, _scalar, _scalar}, {nullptr, nullptr, nullptr}};
else
return Arguments{{_scalar}, {nullptr}};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Row Broadcast
template <
typename Gemm,
typename ElementBias_=void
>
class HostRowBroadcast: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using ElementBias = std::conditional_t<std::is_void_v<ElementBias_>,
typename Base::ElementC,
ElementBias_>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementCompute = typename Base::ElementCompute;
using LayoutTagVector = cutlass::layout::PackedVectorLayout;
struct Arguments {
ElementBias const* ptr_row = nullptr;
ElementBias null_default = ElementBias(0);
cute::Stride<cute::_0,cute::_1,cute::_0> dRow = {};
};
private:
cutlass::NumericConverter<ElementCompute, ElementBias> _bias_converter;
cutlass::HostTensor<ElementBias, LayoutTagVector> _bias;
int _N;
TestBedImpl impl_;
public:
HostRowBroadcast(){}
template<typename ProblemShapeType>
HostRowBroadcast(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
: Base(check_relative_equality), impl_(impl) {
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
_N = cute::get<1>(problem_shape_MNKL);
_bias.resize(cutlass::Coord<1>(_N));
EXPECT_TRUE(
detail::initialize_tensor(
_bias.host_view(), cutlass::Distribution::Uniform,
impl_.collective_mma_inputs.seed + 2023
)
);
_bias.sync_device();
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
auto TensorBias = cute::make_tensor(_bias.host_data(),
cute::make_layout(cute::make_shape(cute::_1{}, _N)));
return _bias_converter(TensorBias(1, n + n_b));
}
bool compare_reference(std::stringstream& error_ss) {
error_ss
<< "PerColumnBias = \n" << _bias.host_view() << "\n\n";
return true;
}
Arguments get_arguments() {
return {_bias.device_data()};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Column Broadcast
template <
typename Gemm,
typename ElementBias_=void
>
class HostColBroadcast: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using ElementBias = std::conditional_t<std::is_void_v<ElementBias_>,
typename Base::ElementC,
ElementBias_>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementCompute = typename Base::ElementCompute;
using LayoutTagVector = cutlass::layout::PackedVectorLayout;
struct Arguments {
ElementBias const* ptr_row = nullptr;
ElementBias null_default = ElementBias(0);
cute::Stride<cute::_1,cute::_0,cute::_0> dRow = {};
};
private:
cutlass::NumericConverter<ElementCompute, ElementBias> _bias_converter;
cutlass::HostTensor<ElementBias, LayoutTagVector> _bias;
int _M;
TestBedImpl impl_;
public:
HostColBroadcast(){}
template<typename ProblemShapeType>
HostColBroadcast(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
: Base(check_relative_equality), impl_(impl) {
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
_M = cute::get<0>(problem_shape_MNKL);
_bias.resize(cutlass::Coord<1>(_M));
EXPECT_TRUE(
detail::initialize_tensor(
_bias.host_view(), cutlass::Distribution::Uniform,
impl_.collective_mma_inputs.seed + 2023
)
);
_bias.sync_device();
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
auto TensorBias = cute::make_tensor(_bias.host_data(),
cute::make_layout(cute::make_shape(_M, cute::_1{})));
return _bias_converter(TensorBias(m + m_b, 1));
}
bool compare_reference(std::stringstream& error_ss) {
error_ss
<< "PerRowBias = \n" << _bias.host_view() << "\n\n";
return true;
}
Arguments get_arguments() {
return {_bias.device_data()};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Aux Load
template <
typename Gemm,
bool isC=false,
typename ElementAuxLoad_=void,
typename LayoutTagAux_=void
>
class HostAuxLoad: public HostEVTNodeBase<Gemm> {
public:
using ElementAuxLoad = std::conditional_t<std::is_void_v<ElementAuxLoad_>,
typename HostEVTNodeBase<Gemm>::ElementC,
ElementAuxLoad_>;
using LayoutTagAux = std::conditional_t<std::is_void_v<LayoutTagAux_>,
typename HostEVTNodeBase<Gemm>::LayoutTagC,
LayoutTagAux_>;
using Base = HostEVTNodeBase<Gemm>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementCompute = typename Base::ElementCompute;
using StrideAux = cutlass::gemm::TagToStrideC_t<LayoutTagAux>;
struct Arguments_Aux {
ElementAuxLoad const *ptr_aux = nullptr;
ElementAuxLoad null_default = ElementAuxLoad(0);
StrideAux dAux = {};
};
struct Arguments_C {};
using Arguments = cute::conditional_t<isC, Arguments_C, Arguments_Aux>;
private:
cutlass::NumericConverter<ElementCompute, ElementAuxLoad> _aux_load_converter;
cutlass::HostTensor<ElementAuxLoad, LayoutTagAux> _tensor_aux_load;
int _M, _N, _L;
TestBedImpl impl_;
StrideAux _stride_aux;
public:
HostAuxLoad(){}
template<typename ProblemShapeType>
HostAuxLoad(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
: Base(check_relative_equality), impl_(impl) {
auto problem_shape_NMKL = cute::append<4>(problem_size, 1);
auto [_M, _N, K, _L] = problem_shape_NMKL;
auto aux_coord = cutlass::make_Coord(_M * _L, _N);
_tensor_aux_load.resize(
aux_coord,
cutlass::layout::Affine2Layout_Factory<LayoutTagAux>::layout_factory(
aux_coord, typename LayoutTagAux::Stride()
)
);
EXPECT_TRUE(
detail::initialize_tensor(
_tensor_aux_load.host_view(),
cutlass::Distribution::Uniform,
impl_.collective_mma_inputs.seed + 2023
)
);
_tensor_aux_load.sync_device();
_stride_aux = cutlass::make_cute_packed_stride(StrideAux{}, cute::make_shape(_M, _N, _L));
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
auto TensorAuxLoad = cute::make_tensor(_tensor_aux_load.host_data(),
cute::make_layout(cute::make_shape(_M, _N, _L), _stride_aux));
return _aux_load_converter(TensorAuxLoad(m + m_b, n + n_b, l));
}
bool compare_reference(std::stringstream& error_ss) {
if constexpr (!isC) {
error_ss
<< "AuxLoad = \n" << _tensor_aux_load.host_view()<< "\n\n";
}
return true;
}
void* get_tensor_C_ptr() {
if constexpr (isC) {
return static_cast<void*>(_tensor_aux_load.device_data());
} else {
return nullptr;
}
}
Arguments get_arguments() {
if constexpr (isC)
return {};
else
return {_tensor_aux_load.device_data(), ElementAuxLoad(0), _stride_aux};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Compute
template<typename T>
T* findNonNullPtr(T* first_ptr) {
return first_ptr;
}
template <typename T, typename... Args>
T* findNonNullPtr(T* first_ptr, Args... args) {
if (first_ptr) {
return first_ptr;
}
return findNonNullPtr(args...);
}
template <
typename Gemm,
template <class> class ComputeOp_
>
class HostCompute: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using ElementCompute = typename Base::ElementCompute;
using ComputeOp = ComputeOp_<ElementCompute>;
struct Arguments {
struct OpArgs {} op;
};
private:
ComputeOp _op;
public:
HostCompute(){}
template <typename ProblemShapeType, typename TestBedImpl>
HostCompute(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false):
Base(check_relative_equality) { }
template <class ElementAccumulator, typename... Args>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc, Args... frg_inputs) {
return _op(frg_inputs...);
}
Arguments get_arguments(){
return {};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Unary Compute
template <
typename Gemm,
template <class> class ComputeOp_,
typename Child0
>
class HostUnaryCompute: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using ElementCompute = typename Base::ElementCompute;
using ComputeOp = ComputeOp_<ElementCompute>;
struct Arguments {
typename Child0::Arguments child_0_args;
struct OpArgs {} op;
};
private:
ComputeOp _op;
Child0 _child_0;
public:
HostUnaryCompute(){}
template <typename ProblemShapeType, typename TestBedImpl>
HostUnaryCompute(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false):
Base(check_relative_equality),
_child_0(problem_size, impl, check_relative_equality) { }
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
ElementCompute child_0_result = _child_0.visit(m, n, l, m_b, n_b, acc);
return _op(child_0_result);
}
Arguments get_arguments(){
return {
_child_0.get_arguments(),
{},
};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Aux Store
template <
typename Gemm,
bool isD=false,
class ElementAuxStore_=void,
typename LayoutTagAux_=void
>
class HostAuxStore: public HostEVTNodeBase<Gemm> {
public:
using ElementAuxStore = std::conditional_t<std::is_void_v<ElementAuxStore_>,
typename HostEVTNodeBase<Gemm>::ElementD,
ElementAuxStore_>;
using LayoutTagAux = std::conditional_t<std::is_void_v<LayoutTagAux_>,
typename HostEVTNodeBase<Gemm>::LayoutTagD,
LayoutTagAux_>;
using Base = HostEVTNodeBase<Gemm>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementCompute = typename Base::ElementCompute;
using StrideAux = cutlass::gemm::TagToStrideC_t<LayoutTagAux>;
struct Arguments_Aux {
struct OpArgs {
ElementAuxStore* ptr_aux = nullptr;
StrideAux dAux = {};
} op;
};
struct Arguments_D {};
using Arguments = cute::conditional_t<isD, Arguments_D, Arguments_Aux>;
private:
cutlass::NumericConverter<ElementAuxStore, ElementCompute> destination_converter;
cutlass::HostTensor<ElementAuxStore, LayoutTagAux> _tensor_aux_store;
cutlass::HostTensor<ElementAuxStore, LayoutTagAux> _reference_aux_store;
int _M, _N, _L;
TestBedImpl impl_;
StrideAux _stride_aux;
public:
HostAuxStore(){}
template <typename ProblemShapeType>
HostAuxStore(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false):
Base(check_relative_equality),
impl_(impl) {
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto [_M, _N, K, _L] = problem_shape_MNKL;
auto aux_coord = cutlass::make_Coord(_M * _L, _N);
_tensor_aux_store.resize(
aux_coord,
cutlass::layout::Affine2Layout_Factory<LayoutTagAux>::layout_factory(
aux_coord, typename LayoutTagAux::Stride()
)
);
_reference_aux_store.resize(
aux_coord,
cutlass::layout::Affine2Layout_Factory<LayoutTagAux>::layout_factory(
aux_coord, typename LayoutTagAux::Stride()
)
);
_tensor_aux_store.sync_device();
_stride_aux = cutlass::make_cute_packed_stride(StrideAux{}, cute::make_shape(_M, _N, _L));
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc, ElementCompute child_0_result) {
auto TensorAuxStore = cute::make_tensor(static_cast<ElementAuxStore*>(_reference_aux_store.host_data()),
cute::make_layout(cute::make_shape(_M, _N, _L), _stride_aux));
TensorAuxStore(m + m_b, n + n_b, l) = destination_converter(child_0_result);
return child_0_result;
}
bool compare_reference(std::stringstream& error_ss) {
// Verify the store node
_tensor_aux_store.sync_host();
bool equal = this->equality_check(_reference_aux_store.host_view(), _tensor_aux_store.host_view());
if (!equal) {
error_ss
<< "\n\nReference =\n" << _reference_aux_store.host_view()
<< "\n\nComputed =\n" << _tensor_aux_store.host_view() << "\n\n";
}
return equal;
}
void* get_tensor_D_ptr() {
if constexpr (isD)
return static_cast<void*>(_tensor_aux_store.device_data());
else
return nullptr;
}
Arguments get_arguments() {
if constexpr (isD) {
return {};
} else {
return {_tensor_aux_store.device_data(), _stride_aux};
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Row Reduce
template <
typename Gemm,
template <class> class ReduceFn,
typename ElementReduce
>
class HostRowReduce: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementCompute = typename Base::ElementCompute;
using ElementOutput = typename Base::ElementD;
using LayoutTagVector = cutlass::layout::PackedVectorLayout;
struct Arguments {
struct OpArgs {
ElementReduce* ptr_row = nullptr;
ElementCompute reduce_identity = 0;
cute::Stride<cute::_0, cute::_1, cute::_0> dRow = {};
} op;
};
private:
cutlass::NumericConverter<ElementReduce, ElementCompute> destination_converter;
cutlass::HostTensor<ElementReduce, LayoutTagVector> _tensor_row_reduce;
cutlass::HostTensor<ElementCompute, LayoutTagVector> _reduce_buffer;
cutlass::HostTensor<ElementReduce, LayoutTagVector> _reference_row_reduce;
int _N;
TestBedImpl impl_;
ReduceFn<ElementCompute> reduce_fn;
public:
HostRowReduce(){}
template <typename ProblemShapeType>
HostRowReduce(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false):
Base(check_relative_equality),
impl_(impl) {
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
_N = cute::get<1>(problem_shape_MNKL);
_tensor_row_reduce.resize(cutlass::Coord<1>(_N));
_reference_row_reduce.resize(cutlass::Coord<1>(_N));
_reduce_buffer.resize(cutlass::Coord<1>(_N));
_tensor_row_reduce.sync_device();
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc, ElementCompute child_0_result) {
auto TensorRowReduce = cute::make_tensor(_reduce_buffer.host_data(),
cute::make_layout(cute::make_shape(cute::_1{}, _N)));
TensorRowReduce(1, n + n_b) = reduce_fn(TensorRowReduce(1, n + n_b), child_0_result);
return child_0_result;
}
bool compare_reference(std::stringstream& error_ss) {
// Verify the store node
_tensor_row_reduce.sync_host();
auto TensorRowReduce = cute::make_tensor(_reference_row_reduce.host_data(),
cute::make_layout(cute::make_shape(cute::_1{}, _N)));
auto TensorReduceBuffer = cute::make_tensor(_reduce_buffer.host_data(),
cute::make_layout(cute::make_shape(cute::_1{}, _N)));
// Filling the reference tensor with the reduce buffer
for (int n = 0; n < _N; n ++) {
TensorRowReduce(1, n) = destination_converter(TensorReduceBuffer(1, n));
}
bool equal = this->equality_check(_reference_row_reduce.host_view(), _tensor_row_reduce.host_view());
if (!equal) {
error_ss
<< "\n\nRow Reduce Reference =\n" << _reference_row_reduce.host_view()
<< "\n\nRow Reduce Computed =\n" << _tensor_row_reduce.host_view() << "\n\n";
}
return equal;
}
Arguments get_arguments() {
return {_tensor_row_reduce.device_data()};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Column Reduce
template <
typename Gemm,
template <class> class ReduceFn,
typename ElementReduce
>
class HostColumnReduce: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementCompute = typename Base::ElementCompute;
using ElementOutput = typename Base::ElementD;
using LayoutTagVector = cutlass::layout::PackedVectorLayout;
struct Arguments {
struct OpArgs {
ElementReduce* ptr_col = nullptr;
ElementCompute reduce_identity = 0;
cute::Stride<cute::_1, cute::_0, cute::_0> dRow = {};
} op;
};
private:
cutlass::NumericConverter<ElementReduce, ElementCompute> destination_converter;
cutlass::HostTensor<ElementReduce, LayoutTagVector> _tensor_column_reduce;
cutlass::HostTensor<ElementCompute, LayoutTagVector> _reduce_buffer;
cutlass::HostTensor<ElementReduce, LayoutTagVector> _reference_column_reduce;
int _M;
TestBedImpl impl_;
ReduceFn<ElementCompute> reduce_fn;
public:
HostColumnReduce(){}
template <typename ProblemShapeType>
HostColumnReduce(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false):
Base(check_relative_equality),
impl_(impl) {
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
_M = cute::get<0>(problem_shape_MNKL);
_tensor_column_reduce.resize(cutlass::Coord<1>(_M));
_reference_column_reduce.resize(cutlass::Coord<1>(_M));
_reduce_buffer.resize(cutlass::Coord<1>(_M));
_tensor_column_reduce.sync_device();
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc, ElementCompute child_0_result) {
auto TensorColReduce = cute::make_tensor(_reduce_buffer.host_data(),
cute::make_layout(cute::make_shape(_M, cute::_1{})));
TensorColReduce(m + m_b, 1) = reduce_fn(TensorColReduce(m + m_b, 1), child_0_result);
return child_0_result;
}
bool compare_reference(std::stringstream& error_ss) {
// Verify the store node
_tensor_column_reduce.sync_host();
auto TensorColReduce = cute::make_tensor(_reference_column_reduce.host_data(),
cute::make_layout(cute::make_shape(_M, cute::_1{})));
auto TensorReduceBuffer = cute::make_tensor(_reduce_buffer.host_data(),
cute::make_layout(cute::make_shape(_M, cute::_1{})));
// Filling the reference tensor with the reduce buffer
for (int m = 0; m < _M; m ++) {
TensorColReduce(m, 1) = destination_converter(TensorReduceBuffer(m, 1));
}
bool equal = this->equality_check(_reference_column_reduce.host_view(), _tensor_column_reduce.host_view());
if (!equal) {
error_ss
<< "\n\nColumn Reduce Reference =\n" << _reference_column_reduce.host_view()
<< "\n\nColumn Reduce Computed =\n" << _tensor_column_reduce.host_view() << "\n\n";
}
return equal;
}
Arguments get_arguments() {
return {_tensor_column_reduce.device_data()};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// EVT - Scalar Reduce
template <
typename Gemm,
template <class> class ReduceFn,
typename ElementReduce
>
class HostScalarReduce: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using TestBedImpl = typename Base::TestBedImpl;
using ElementCompute = typename Base::ElementCompute;
using ElementOutput = typename Base::ElementD;
using LayoutTagVector = cutlass::layout::PackedVectorLayout;
struct Arguments {
struct OpArgs {
ElementReduce* ptr_scalar = nullptr;
ElementCompute reduce_identity = 0;
cute::Stride<cute::_0, cute::_0, cute::_0> dScalar = {};
} op;
};
private:
cutlass::NumericConverter<ElementReduce, ElementCompute> destination_converter;
cutlass::HostTensor<ElementReduce, LayoutTagVector> _tensor_scalar_reduce;
cutlass::HostTensor<ElementCompute, LayoutTagVector> _reduce_buffer;
cutlass::HostTensor<ElementReduce, LayoutTagVector> _reference_scalar_reduce;
ReduceFn<ElementCompute> reduce_fn;
TestBedImpl impl_;
public:
HostScalarReduce(){}
template <typename ProblemShapeType>
HostScalarReduce(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false):
Base(check_relative_equality),
impl_(impl) {
_tensor_scalar_reduce.resize(cutlass::Coord<1>(1));
_reference_scalar_reduce.resize(cutlass::Coord<1>(1));
_reduce_buffer.resize(cutlass::Coord<1>(1));
_tensor_scalar_reduce.sync_device();
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc, ElementCompute child_0_result) {
auto TensorRowReduce = cute::make_tensor(_reduce_buffer.host_data(),
cute::make_layout(cute::make_shape(cute::_1{})));
TensorRowReduce(0) = reduce_fn(TensorRowReduce(0), child_0_result);
return child_0_result;
}
bool compare_reference(std::stringstream& error_ss) {
// Verify the store node
_tensor_scalar_reduce.sync_host();
auto TensorRowReduce = cute::make_tensor(_reference_scalar_reduce.host_data(),
cute::make_layout(cute::make_shape(cute::_1{})));
auto TensorReduceBuffer = cute::make_tensor(_reduce_buffer.host_data(),
cute::make_layout(cute::make_shape(cute::_1{})));
// Filling the reference tensor with the reduce buffer
TensorRowReduce(0) = destination_converter(TensorReduceBuffer(0));
bool equal = this->equality_check(_reference_scalar_reduce.host_view(), _tensor_scalar_reduce.host_view());
if (!equal) {
error_ss
<< "\n\nScalar Reduce Reference =\n" << _reference_scalar_reduce.host_view()
<< "\n\nScalar Reduce Computed =\n" << _tensor_scalar_reduce.host_view() << "\n\n";
}
return equal;
}
Arguments get_arguments() {
return {_tensor_scalar_reduce.device_data()};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Host EVT wrapper
/// The ArgumentPack is used to model the alignment when num ops <= 4
template <typename... Ops>
struct ArgumentPack;
template <typename T>
struct ArgumentPack<T> {
T arg;
ArgumentPack(T first):
arg(first) {}
};
template <typename First, typename... Rest>
struct ArgumentPack<First, Rest...> {
First arg;
ArgumentPack<Rest...> rest_args;
ArgumentPack(First first, Rest... rest) :
arg(first), rest_args(rest...) {}
};
/// Base class for Host Visitor
template <typename Gemm, class... Ops>
struct HostVisitorBase: public HostEVTNodeBase<Gemm> {
public:
using Base = HostEVTNodeBase<Gemm>;
using ElementCompute = typename Base::ElementCompute;
using Arguments_struct = ArgumentPack<typename Ops::Arguments...>;
using Arguments_tuple = cute::tuple<typename Ops::Arguments...>;
constexpr static int Rm1 = sizeof...(Ops);
constexpr static bool cond = Rm1 > 4;
using Arguments = cute::conditional_t<cond, Arguments_tuple, Arguments_struct>;
std::tuple<Ops...> ops;
HostVisitorBase(){}
template<typename ProblemShapeType, typename TestBedImpl>
HostVisitorBase(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
:Base(check_relative_equality),
ops(test::gemm::device::tapply(std::tuple<Ops...>{},
[&] (auto&& op) {
using Op = cute::remove_cvref_t<decltype(op)>;
return Op(problem_size, impl, check_relative_equality);
},
[] (auto&&... _ops) {
return std::make_tuple(_ops...);
},
cute::make_seq<Rm1>{}
)){ }
bool compare_reference(std::stringstream& error_ss) {
return cute::detail::tapply(ops,
[&](auto& op) {
return op.compare_reference(error_ss);
},
[&] (auto&&... inputs) {
return arrayAnd(inputs...);
},
cute::make_seq<Rm1>{}
);
}
void* get_tensor_C_ptr() {
return cute::detail::tapply(ops,
[&](auto& op) {
return op.get_tensor_C_ptr();
},
[&] (auto&&... inputs) {
return findNonNullPtr(inputs...);
},
cute::make_seq<Rm1>{}
);
}
void* get_tensor_D_ptr() {
return cute::detail::tapply(ops,
[&](auto& op) {
return op.get_tensor_D_ptr();
},
[&] (auto&&... inputs) {
return findNonNullPtr(inputs...);
},
cute::make_seq<Rm1>{}
);
}
Arguments get_arguments() {
return test::gemm::device::tapply(ops,
[&](auto& op) {
return op.get_arguments();
},
[&] (auto&&... args) {
if constexpr (Rm1 > 4) {
return cute::make_tuple(args...);
} else {
return Arguments(args...);
}
},
cute::make_seq<Rm1>{}
);
}
bool arrayAnd(bool passed) {
return passed;
}
template <typename... Args>
bool arrayAnd(bool first_passed, Args... passed) {
if (first_passed) {
return arrayAnd(passed...);
}
return first_passed;
}
};
/// Tree-struct visitor
template <class NodeOp, class... ChildOps>
struct HostTreeVisitor: public HostVisitorBase<typename NodeOp::Base::Gemm, ChildOps..., NodeOp> {
public:
using Gemm = typename NodeOp::Base::Gemm;
using Base = HostVisitorBase<Gemm, ChildOps..., NodeOp>;
using ElementCompute = typename Base::ElementCompute;
using Arguments = typename Base::Arguments;
constexpr static int Rm1 = sizeof...(ChildOps);
HostTreeVisitor(){}
template<typename ProblemShapeType, typename TestBedImpl>
HostTreeVisitor(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
:Base(problem_size, impl, check_relative_equality){ }
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
return cute::detail::tapply(this->ops,
[&] (auto& op) {
return op.visit(m, n, l, m_b, n_b, acc);
},
[&] (auto&&... frg_inputs) {
return std::get<Rm1>(this->ops).visit(m, n, l, m_b, n_b, acc, frg_inputs...);
},
cute::make_seq<Rm1>{}
);
}
};
/// General Graph visitor
template <class Gemm, class EdgeTuple, class... Ops>
struct HostTopoVisitor: public HostVisitorBase<Gemm, Ops...> {
public:
using Base = HostVisitorBase<Gemm, Ops...>;
using ElementCompute = typename Base::ElementCompute;
constexpr static int Rm1 = Base::Rm1;
using Arguments = typename Base::Arguments;
private:
ElementCompute frg_outputs[Rm1];
public:
HostTopoVisitor(){}
template<typename ProblemShapeType, typename TestBedImpl>
HostTopoVisitor(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
:Base(problem_size, impl, check_relative_equality) { }
template<class ElementAccumulator, int I>
ElementCompute visit_(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
frg_outputs[I] = cute::transform_apply(cute::get<I>(EdgeTuple{}),
[&] (auto&& _E) {
constexpr int e = cute::remove_cvref_t<decltype(_E)>::value;
return frg_outputs[e];
},
[&] (auto const&... frg_inputs) {
ElementCompute res = std::get<I>(this->ops).visit(m, n, l, m_b, n_b, acc, frg_inputs...);
return res;
}
);
if constexpr (I < Rm1 - 1) {
return visit_<ElementAccumulator, I+1>(m, n, l, m_b, n_b, acc);
} else {
return frg_outputs[I];
}
}
template <class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
return visit_<ElementAccumulator, 0>(m, n, l, m_b, n_b, acc);
}
};
/// SplitTree visitor
template <class Gemm, class InputTree, class OutputTree, class... AuxOutTrees>
struct HostSplitTreeVisitor: public HostVisitorBase<Gemm, InputTree, AuxOutTrees..., OutputTree> {
public:
using Base = HostVisitorBase<Gemm, InputTree, AuxOutTrees..., OutputTree>;
using ElementCompute = typename Base::ElementCompute;
using Arguments = typename Base::Arguments;
constexpr static int Rm2 = sizeof...(AuxOutTrees);
private:
ElementCompute frg_input;
public:
HostSplitTreeVisitor(){}
template<typename ProblemShapeType, typename TestBedImpl>
HostSplitTreeVisitor(ProblemShapeType problem_size, TestBedImpl impl, bool check_relative_equality=false)
:Base(problem_size, impl, check_relative_equality) { }
template<class ElementAccumulator, int I>
void visitAux(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator frag) {
std::get<I+1>(this->ops).visit(m, n, l, m_b, n_b, frag);
if constexpr (I < Rm2 - 1) {
return visitAux<ElementAccumulator, I+1>(m, n, l, m_b, n_b, frag);
} else {
return;
}
}
template<class ElementAccumulator>
ElementCompute visit(
int64_t m, int64_t n, int64_t l, int m_b, int n_b,
ElementAccumulator acc) {
/// Compute the input tree
frg_input = std::get<0>(this->ops).visit(m, n, l, m_b, n_b, acc);
/// Compute the aux out tree
visitAux<ElementAccumulator, 0>(m, n, l, m_b, n_b, frg_input);
/// Visit the output tree
return std::get<Rm2+1>(this->ops).visit(m, n, l, m_b, n_b, frg_input);
}
};
/// Universal testbed for EVT
template <class Gemm, typename EVT>
class Testbed3xEVT {
public:
// The EVT Module to test
using EVTModule = typename EVT::EVTModule;
using TestBedImpl = typename detail::TestbedImpl<Gemm, cutlass::epilogue::thread::Identity, true>;
using Kernel = typename Gemm::GemmKernel;
using Epilogue = typename Gemm::GemmKernel::CollectiveEpilogue;
using ElementAccumulator = typename Kernel::ElementAccumulator;
using ElementC = typename Kernel::ElementC;
using ElementD = typename Kernel::ElementD;
using ProblemShapeType = typename Kernel::ProblemShape;
using LayoutTagA = typename TestBedImpl::LayoutTagA;
using LayoutTagB = typename TestBedImpl::LayoutTagB;
using LayoutTagC = typename TestBedImpl::LayoutTagC;
using LayoutTagD = typename TestBedImpl::LayoutTagD;
//
// Methods
//
Testbed3xEVT(
bool check_relative_equality_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = TestBedImpl::kDefaultSeed
) :
impl_((check_relative_equality_ ? CheckEquality::RELATIVE : CheckEquality::EXACT), ScalarLoc::ON_DEVICE, VectorBeta::ENABLED,
init_A_, init_B_, init_C_, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform, seed_),
check_relative_equality(check_relative_equality_) { }
Testbed3xEVT(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = TestBedImpl::kDefaultSeed
) :
impl_(CheckEquality::EXACT, ScalarLoc::ON_DEVICE, VectorBeta::ENABLED,
init_A_, init_B_, init_C_, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform, seed_),
check_relative_equality(false) { }
Testbed3xEVT(
typename LayoutTagA::Stride stride_factor_A_,
typename LayoutTagB::Stride stride_factor_B_,
typename LayoutTagC::Stride stride_factor_C_,
typename LayoutTagD::Stride stride_factor_D_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = TestBedImpl::kDefaultSeed
) :
impl_(stride_factor_A_, stride_factor_B_, stride_factor_C_, stride_factor_D_,
CheckEquality::EXACT, ScalarLoc::ON_DEVICE, VectorBeta::ENABLED,
init_A_, init_B_, init_C_, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform, seed_),
check_relative_equality(false) { }
/// Initializes data structures
void initialize(ProblemShapeType problem_size) {
//
// Allocate the GEMM workspace for A/B tensor
//
impl_.initialize(problem_size);
}
// Detail Implementation
TestBedImpl impl_;
// Whether to use relative equality checks
bool check_relative_equality;
bool verify(ProblemShapeType problem_size, EVTModule& host_reference) {
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto M = cute::get<0>(problem_shape_MNKL);
auto N = cute::get<1>(problem_shape_MNKL);
auto K = cute::get<2>(problem_shape_MNKL);
auto L = cute::get<3>(problem_shape_MNKL);
auto A = cute::make_tensor(impl_.collective_mma_inputs.tensor_A.host_data(),
cute::make_layout(cute::make_shape(M, K, L), impl_.collective_mma_inputs.stride_a));
auto B = cute::make_tensor(impl_.collective_mma_inputs.tensor_B.host_data(),
cute::make_layout(cute::make_shape(N, K, L), impl_.collective_mma_inputs.stride_b));
auto LayoutD = cute::make_layout(cute::make_shape(M, N, L), impl_.collective_epilogue.stride_d);
cutlass::reference::host::GettMainloopParams<ElementAccumulator, decltype(A), decltype(B)> mainloop_params{A, B};
/// Reference Kernel
static int constexpr kBlockM = 64;
static int constexpr kBlockN = 64;
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int64_t l = 0; l < cute::size<2>(mainloop_params.A.layout()); ++l) {
for (int64_t m = 0; m < cute::size<0>(mainloop_params.A.layout()); m += kBlockM) {
for (int64_t n = 0; n < cute::size<0>(mainloop_params.B.layout()); n += kBlockN) {
ElementAccumulator acc[kBlockM][kBlockN];
gett_mainloop(mainloop_params, m, n, l, acc);
/// Epilogue EVT
for (int n_b = 0; n_b < kBlockN; ++n_b) {
for (int m_b = 0; m_b < kBlockM; ++m_b) {
if (m + m_b < cute::size<0>(LayoutD) && n + n_b < cute::size<1>(LayoutD)) {
host_reference.visit(m, n, l, m_b, n_b, acc[m_b][n_b]);
}
}
}
}
}
}
std::stringstream error_ss;
bool passed = host_reference.compare_reference(error_ss);
if (!passed) {
std::stringstream fname;
fname << "error_Gemm_device_"
<< M << "x" << N << "x" << K << "x" << L << "_"
<< cute::get<0>(typename Gemm::GemmKernel::TileShape{}) << "_"
<< cute::get<1>(typename Gemm::GemmKernel::TileShape{}) << "_"
<< cute::get<2>(typename Gemm::GemmKernel::TileShape{}) << ".txt";
std::ofstream file(fname.str());
file
<< "problem: " << ' ' << M << "x" << N << "x" << K
<< ", Batch count = " << L << "\n\n";
file
<< "A =\n" << impl_.collective_mma_inputs.tensor_A.host_view()
<< "\nB =\n" << impl_.collective_mma_inputs.tensor_B.host_view()
<< "\nC =\n" << impl_.collective_epilogue.tensor_C.host_view() << "\n\n";
file << error_ss.str();
}
return passed;
}
bool run(
ProblemShapeType problem_size,
bool profiling = false,
int iterations = 20,
int splits = 1) {
// Fail test if insufficient CUDA device
if (!impl_.sufficient()) {
std::cout << "Test failed due to insufficient CUDA device." << std::endl;
return false;
}
//
// Initialize the Gemm operator
//
typename Gemm::Arguments arguments;
cutlass::KernelHardwareInfo hw_info;
hw_info.device_id = 0;
if (not profiling) {
impl_.sm_count = std::min(impl_.MaxSmCount, cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id));
hw_info.sm_count = impl_.sm_count;
}
else {
impl_.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
hw_info.sm_count = impl_.sm_count;
}
typename Gemm::GemmKernel::TileScheduler::Arguments scheduler_args;
if constexpr (cute::is_same_v<typename Gemm::GemmKernel::TileSchedulerTag, cutlass::gemm::StreamKScheduler>) {
scheduler_args = { splits };
}
/// Initializes data structures
/// A/B/C/D Tensor
initialize(problem_size);
/// Initialize the epilogue arguments
EVTModule host_reference(problem_size, impl_, check_relative_equality);
arguments = typename Gemm::Arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size,
{
impl_.collective_mma_inputs.tensor_A.device_data(), impl_.collective_mma_inputs.stride_a,
impl_.collective_mma_inputs.tensor_B.device_data(), impl_.collective_mma_inputs.stride_b
},
{ // Epilogue arguments
{}, // thread
static_cast<ElementC*>(host_reference.get_tensor_C_ptr()),
impl_.collective_epilogue.stride_c,
static_cast<ElementD*>(host_reference.get_tensor_D_ptr()),
impl_.collective_epilogue.stride_d
}, // Epilogue arguments end
hw_info,
scheduler_args
};
// Filling in the thread arguments
typename EVTModule::Arguments epilogue_args = host_reference.get_arguments();
std::memcpy(&arguments.epilogue.thread, &epilogue_args.arg, sizeof(epilogue_args.arg));
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.can_implement(arguments);
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
//
// Run the GEMM
//
if (profiling) {
return impl_.profile(problem_size, iterations, gemm_op, arguments, workspace);
}
else {
cudaError_t result;
status = gemm_op.initialize(arguments, workspace.get());
status = gemm_op.run();
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
EXPECT_EQ(result, cudaSuccess) << "Error at Kernel Sync.";
return false;
}
}
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, host_reference);
if (!passed) {
std::cout << "Error : Failed \n";
}
return passed;
}
};
template <typename Gemm, typename EVT>
bool TestAllEVT(bool check_relative_equality=false) {
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
int max_alignment = std::max(Gemm::kAlignmentA, Gemm::kAlignmentB);
std::vector<int> problem_size_m = {max_alignment, 512 - 3 * max_alignment};
std::vector<int> problem_size_n = {max_alignment, 512 - 2 * max_alignment};
if constexpr (cute::is_same_v<typename Gemm::GemmKernel::DispatchPolicy::Schedule,
cutlass::gemm::KernelTmaWarpSpecializedPingpong>) {
problem_size_m.push_back(768);
problem_size_n.push_back(768);
}
constexpr int Stages = Gemm::GemmKernel::DispatchPolicy::Stages;
constexpr int TileShapeK = cute::size<2>(typename Gemm::GemmKernel::TileShape{});
std::vector<int> problem_size_k = {max_alignment, TileShapeK * (Stages + 1) - max_alignment};
Testbed3xEVT<Gemm, EVT> testbed(check_relative_equality);
bool passed = true;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
ProblemShapeType problem_size;
if constexpr (cute::rank(ProblemShapeType{}) == 4) {
problem_size = ProblemShapeType{m, n, k, /* l */ 1};
}
else {
problem_size = ProblemShapeType{m, n, k};
}
passed = testbed.run(problem_size);
if (!passed) {
return false;
}
}
}
}
// if we do support batched GEMM, just run one test on it to save on test time
if constexpr (cute::rank(ProblemShapeType{}) == 4) {
auto problem_size = ProblemShapeType{256 + max_alignment, 256 + max_alignment, 160 + max_alignment, /* l */ 3};
passed = testbed.run(
problem_size
);
if (!passed) {
return false;
}
}
return passed;
}
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/gemm_testbed_3x_evt.hpp/0 | {
"file_path": "test/unit/gemm/device/gemm_testbed_3x_evt.hpp",
"repo_id": "test",
"token_count": 18157
} | 51 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <sstream>
#include <stdexcept>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "testbed.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
struct TestbedComplex : public Testbed<Gemm> {
using Base = Testbed<Gemm>;
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute;
//
// Methods
//
TestbedComplex(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
Base(init_A_, init_B_, init_C_, seed_) { }
/// Verifies the result is a GEMM
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
//
// Verify
//
cutlass::reference::host::GemmComplex(
problem_size,
alpha,
this->tensor_A.host_ref(),
Gemm::kTransformA,
this->tensor_B.host_ref(),
Gemm::kTransformB,
beta,
this->tensor_C.host_ref(),
this->reference_D.host_ref(),
ElementAccumulator(0)
);
return this->compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmCoord problem_size,
int split_k_slices = 1,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
//
// Initialize workspace
//
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
problem_size,
this->tensor_A.device_ref(),
this->tensor_B.device_ref(),
this->tensor_C.device_ref(),
this->tensor_D.device_ref(),
{alpha, beta},
split_k_slices
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Error with split_k_slices = " << split_k_slices << ", alpha: " << alpha << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
bool TestAllGemmComplex() {
bool passed = true;
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
int const kMinimumOperandElementSize =
std::min(
int(cutlass::sizeof_bits<typename Gemm::ElementA>::value),
int(cutlass::sizeof_bits<typename Gemm::ElementB>::value));
int const kAlignment =
cutlass::platform::is_same<
typename Gemm::OperatorClass,
cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize;
int problem_size_m[] = {
kAlignment, 512 - 3*kAlignment
};
int problem_size_n[] = {
kAlignment, 512 - 2*kAlignment
};
int problem_size_k[] = {
kAlignment, 128 - kAlignment
};
int split_k_slices[] = {
1, 2, 3
};
double problem_alpha[] = {
1
};
double problem_beta[] = {
2.0
};
TestbedComplex<Gemm> testbed;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (int split_k : split_k_slices) {
if (!Gemm::kSplitKSerial && split_k > 1) {
continue;
}
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
cutlass::gemm::GemmCoord problem_size(m, n, k);
passed = testbed.run(
problem_size,
split_k,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
if (!passed) {
return false;
}
}
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_complex.h/0 | {
"file_path": "test/unit/gemm/device/testbed_complex.h",
"repo_id": "test",
"token_count": 2998
} | 52 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "testbed_utils.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm, bool Relu = false>
struct TestbedUniversal {
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A;
cutlass::HostTensor<typename Gemm::ElementB, typename Gemm::LayoutB> tensor_B;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_C;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_D;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> reference_D;
//
// Methods
//
TestbedUniversal(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value;
bool is_unsigned_int = std::numeric_limits<Element>::is_integer && !std::numeric_limits<Element>::is_signed;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = is_unsigned_int ? 2 : 1;
scope_min = is_unsigned_int ? 0 : -1;
} else if (bits_output == 16) {
scope_max = is_unsigned_int ? 10 : 5;
scope_min = is_unsigned_int ? 0 : -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the GEMM workspace
//
tensor_A.resize(problem_size.mk());
tensor_B.resize(problem_size.kn());
tensor_C.resize(problem_size.mn());
tensor_D.resize(problem_size.mn());
reference_D.resize(problem_size.mn(), false);
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
cutlass::Coord<2> origin(0);
tensor_A.host_view().at(origin) = typename Gemm::ElementA(1);
tensor_B.host_view().at(origin) = typename Gemm::ElementB(1);
tensor_C.host_view().at(origin) = typename Gemm::ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view());
EXPECT_TRUE(passed) << " mismatched reference";
if (!passed) {
/*
std::stringstream fname;
fname << "error_Gemm_device_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Gemm::ThreadblockShape::kM << "x"
<< Gemm::ThreadblockShape::kN << "x"
<< Gemm::ThreadblockShape::kK << "_"
<< Gemm::WarpShape::kM << "x"
<< Gemm::WarpShape::kN << "x"
<< Gemm::WarpShape::kK << ".txt";
std::ofstream file(fname.str());
*/
std::ofstream file("testbed_universal_errors.txt");
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\nComputed =\n" << tensor_D.host_view();
}
return passed;
}
/// Verifies the result is a GEMM
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
//
// Verify
//
cutlass::reference::host::GemmComplex<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
typename Gemm::ElementC, typename Gemm::LayoutC,
ElementCompute, ElementAccumulator
>(
problem_size,
alpha,
tensor_A.host_ref(),
Gemm::kTransformA,
tensor_B.host_ref(),
Gemm::kTransformB,
beta,
tensor_C.host_ref(),
reference_D.host_ref(),
ElementAccumulator(0)
);
if (Relu) {
for (int i = 0; i < problem_size.m(); ++i) {
for (int j = 0; j < problem_size.n(); ++j) {
reference_D.at(cutlass::MatrixCoord(i, j)) =
((ElementCompute)reference_D.at(cutlass::MatrixCoord(i, j)) < (ElementCompute)0)
? (typename Gemm::ElementC)0
: reference_D.at(cutlass::MatrixCoord(i, j));
}
}
}
return compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmUniversalMode mode,
cutlass::gemm::GemmCoord problem_size,
int batch_count = 1,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0))
{
/*
std::cout << "\n-----------------------\n";
std::cout << "mode: " << (int) mode << "\n";
std::cout << "problem size: " << problem_size << "\n";
std::cout << "batch_count: " << batch_count << "\n";
std::cout << "alpha: " << alpha << "\n";
std::cout << "beta: " << beta << "\n";
std::cout << "-----------------------\n\n";
*/
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
mode,
problem_size,
batch_count,
{alpha, beta},
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data(),
tensor_D.device_data(),
problem_size.m() * problem_size.k(),
problem_size.n() * problem_size.k(),
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
tensor_A.layout().stride(0),
tensor_B.layout().stride(0),
tensor_C.layout().stride(0),
tensor_D.layout().stride(0)
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Failed with batch_count/split_k_slices = " << batch_count << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm, bool Relu = false>
bool TestGemmUniversal(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmUniversalMode mode,
int batch_count,
double alpha = 1.0,
double beta = 2.0) {
bool passed = true;
TestbedUniversal<Gemm, Relu> testbed;
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
return passed;
}
template <typename Gemm, bool Relu = false>
bool TestAllGemmUniversal() {
bool passed = true;
int const kMinimumOperandElementSize =
std::min(
int(cutlass::sizeof_bits<typename Gemm::ElementA>::value),
int(cutlass::sizeof_bits<typename Gemm::ElementB>::value));
int const kAlignment = cutlass::platform::is_same<
typename Gemm::OperatorClass,
cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize;
// int8_t gemm alignment constraints
int const kAlignmentM = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Gemm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Gemm::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment;
int const kAlignmentN = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Gemm::ElementB, int8_t>::value &&
cutlass::platform::is_same<typename Gemm::LayoutB, cutlass::layout::RowMajor>::value ? 4 : kAlignment;
int const kAlignmentK = cutlass::platform::is_same<typename Gemm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Gemm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Gemm::ElementB, int8_t>::value &&
(cutlass::platform::is_same<typename Gemm::LayoutA, cutlass::layout::RowMajor>::value ||
cutlass::platform::is_same<typename Gemm::LayoutB, cutlass::layout::ColumnMajor>::value) ? 4 : kAlignment;
cutlass::gemm::GemmUniversalMode modes[] = {
cutlass::gemm::GemmUniversalMode::kGemm,
};
int problem_size_m[] = {
kAlignmentM, 512 - 3*kAlignmentM
};
int problem_size_n[] = {
kAlignmentN, 512 - 2*kAlignmentN
};
int problem_size_k[] = {
kAlignmentK,
Gemm::ThreadblockShape::kK * Gemm::kStages - kAlignmentK,
Gemm::ThreadblockShape::kK * Gemm::kStages * 3 - kAlignmentK
};
int batch_counts[] = { // may be interpretted as batch count or split-K slices
1, 2, 3, 5, 7
};
double problem_alpha[] = {
1
};
double problem_beta[] = {
2.0
};
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
for (cutlass::gemm::GemmUniversalMode mode : modes) {
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (int batch_count : batch_counts) {
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
if (mode == cutlass::gemm::GemmUniversalMode::kGemm ||
mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) {
// skip very small K problems
if (k / batch_count < 2 * Gemm::ThreadblockShape::kK) {
continue;
}
}
cutlass::gemm::GemmCoord problem_size(m, n, k);
TestbedUniversal<Gemm, Relu> testbed;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
if (!passed) {
return false;
}
}
}
}
}
}
}
}
/*
// large problem with high coverage
for (int split_k_slices = 1; split_k_slices <= 3; ++split_k_slices) {
TestbedUniversal<Gemm> testbed;
cutlass::gemm::GemmCoord problem_size(72, 56, 8192);
passed = testbed.run(
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size,
split_k_slices,
cutlass::from_real<ElementCompute>(1.0),
cutlass::from_real<ElementCompute>(2.0)
);
if (!passed) {
break;
}
}
*/
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_universal.h/0 | {
"file_path": "test/unit/gemm/device/testbed_universal.h",
"repo_id": "test",
"token_count": 7089
} | 53 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/core_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include "testbed.h"
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_64x64x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_64x32x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_32x32x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_32x16x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 16, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x32_16x16x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<16, 16, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_64x32x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_32x32x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_32x16x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f16, 128x128x64_16x16x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<16, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_64x64x16_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_64x32x16_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_32x32x16_16x8x8) {
using Shape = cutlass::gemm::GemmShape<32, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_32x16x16_16x8x8) {
using Shape = cutlass::gemm::GemmShape<32, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x16_16x16x16_16x8x8) {
using Shape = cutlass::gemm::GemmShape<16, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 16>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_64x64x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_64x32x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_32x32x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_32x16x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<32, 16, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_tf32, 128x128x32_16x16x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<16, 16, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f16, 128x128x32_64x64x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f16, 128x128x32_32x32x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f16, 16x16x32_16x16x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<16, 16, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 16>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 16>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<16, 16, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f16, 32x32x32_32x32x32_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 32, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f16, 128x128x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f16, 128x128x64_32x32x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_tf32, 128x128x16_64x64x16_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_tf32, 128x128x16_32x32x16_16x8x8) {
using Shape = cutlass::gemm::GemmShape<32, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_tf32, 128x128x32_64x64x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_tf32, 128x128x32_32x32x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_tn, tf32_round_128x128x32_64x64x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = float;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
TEST(SM80_warp_gemm_tensor_op_nt, tf32_round_128x128x32_64x64x32_16x8x8) {
using Shape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = float;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 32> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_16x16x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<16, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_32x16x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_32x32x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_64x32x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_16x16x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<16, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_32x16x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_32x32x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_64x32x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x64_64x64x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 32>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_64x64x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_64x32x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_32x32x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_32x16x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x64_16x16x64_16x8x32) {
using Shape = cutlass::gemm::GemmShape<16, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_64x64x128_16x8x32) {
using Shape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_64x32x128_16x8x32) {
using Shape = cutlass::gemm::GemmShape<64, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_32x32x128_16x8x32) {
using Shape = cutlass::gemm::GemmShape<32, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_32x16x128_16x8x32) {
using Shape = cutlass::gemm::GemmShape<32, 16, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i8, 128x128x128_16x16x128_16x8x32) {
using Shape = cutlass::gemm::GemmShape<16, 16, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Element = int8_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_64x64x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_64x32x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<64, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_32x32x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<32, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_32x16x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<32, 16, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x128_16x16x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<16, 16, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 128>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_64x64x256_16x8x64) {
using Shape = cutlass::gemm::GemmShape<64, 64, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 256> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_64x32x256_16x8x64) {
using Shape = cutlass::gemm::GemmShape<64, 32, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 256> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_32x32x256_16x8x64) {
using Shape = cutlass::gemm::GemmShape<32, 32, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 256> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_32x16x256_16x8x64) {
using Shape = cutlass::gemm::GemmShape<32, 16, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 256> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_i4, 128x128x256_16x16x256_16x8x64) {
using Shape = cutlass::gemm::GemmShape<16, 16, 256>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 256>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 256> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_64x64x512_16x8x256) {
using Shape = cutlass::gemm::GemmShape<64, 64, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 512> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_64x32x512_16x8x256) {
using Shape = cutlass::gemm::GemmShape<64, 32, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 512> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_32x32x512_16x8x256) {
using Shape = cutlass::gemm::GemmShape<32, 32, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 512> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_32x16x512_16x8x256) {
using Shape = cutlass::gemm::GemmShape<32, 16, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 512> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x512_16x16x512_16x8x256) {
using Shape = cutlass::gemm::GemmShape<16, 16, 512>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 512>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 512> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_64x64x1024_16x8x256) {
using Shape = cutlass::gemm::GemmShape<64, 64, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 1024> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_64x32x1024_16x8x256) {
using Shape = cutlass::gemm::GemmShape<64, 32, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 1024> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_32x32x1024_16x8x256) {
using Shape = cutlass::gemm::GemmShape<32, 32, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 1024> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_32x16x1024_16x8x256) {
using Shape = cutlass::gemm::GemmShape<32, 16, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 1024> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_b1, 128x128x1024_16x16x1024_16x8x256) {
using Shape = cutlass::gemm::GemmShape<16, 16, 1024>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 256>;
using Element = cutlass::uint1b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 1024>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 1024> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f64, 16x16x4_16x16x4_8x8x4) {
using Shape = cutlass::gemm::GemmShape<16, 16, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<16, 16, 4> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f64, 32x16x4_32x16x4_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 16, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 16, 4> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f64, 32x32x4_32x32x4_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 32, 4> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_congruous_f64, 32x64x4_32x64x4_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 64, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 64, 4> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f64, 16x16x16_16x16x16_8x8x4) {
using Shape = cutlass::gemm::GemmShape<16, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<16, 16, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f64, 32x32x16_32x32x16_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 32, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f64, 64x32x16_64x32x16_8x8x4) {
using Shape = cutlass::gemm::GemmShape<64, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<64, 32, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_crosswise_f64, 32x64x16_32x64x16_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 64, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 64, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_16x16x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<16, 16, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_32x16x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<32, 16, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_32x32x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<32, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_64x32x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<64, 32, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_interleaved, 128x128x128_64x64x128_16x8x64) {
using Shape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>;
using Element = cutlass::int4b_t;
using ElementC = int;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<Element>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 128> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_canonical_f64_row_col, 32x32x8_64x32x8_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_canonical_f64_col_row, 32x32x8_64x32x8_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = double;
using ElementC = double;
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_canonical_tf32_row_col, 32x32x8_64x32x8_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 32, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_tensor_op_canonical_tf32_col_row, 32x32x8_64x32x8_8x8x4) {
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::tfloat32_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor>::Type;
test::gemm::warp::Testbed<MmaTensorOp,
cutlass::gemm::GemmShape<32, 32, 16> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
#endif // if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
| test/unit/gemm/warp/gemm_sm80.cu/0 | {
"file_path": "test/unit/gemm/warp/gemm_sm80.cu",
"repo_id": "test",
"token_count": 32133
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
typedef char int8_t;
typedef unsigned char uint8_t;
typedef short int16_t;
typedef unsigned short uint16_t;
typedef int int32_t;
typedef unsigned int uint32_t;
typedef long long int int64_t;
typedef unsigned long long int uint64_t;
#if defined __x86_64__ && !defined __ILP32__
# define __WORDSIZE 64
#else
# define __WORDSIZE 32
#endif
/* Small types. */
/* Signed. */
typedef signed char int_least8_t;
typedef short int int_least16_t;
typedef int int_least32_t;
#if __WORDSIZE == 64
typedef long int int_least64_t;
#else
__extension__
typedef long long int int_least64_t;
#endif
/* Unsigned. */
typedef unsigned char uint_least8_t;
typedef unsigned short int uint_least16_t;
typedef unsigned int uint_least32_t;
#if __WORDSIZE == 64
typedef unsigned long int uint_least64_t;
#else
__extension__
typedef unsigned long long int uint_least64_t;
#endif
/* Fast types. */
/* Signed. */
typedef signed char int_fast8_t;
#if __WORDSIZE == 64
typedef long int int_fast16_t;
typedef long int int_fast32_t;
typedef long int int_fast64_t;
#else
typedef int int_fast16_t;
typedef int int_fast32_t;
__extension__
typedef long long int int_fast64_t;
#endif
/* Unsigned. */
typedef unsigned char uint_fast8_t;
#if __WORDSIZE == 64
typedef unsigned long int uint_fast16_t;
typedef unsigned long int uint_fast32_t;
typedef unsigned long int uint_fast64_t;
#else
typedef unsigned int uint_fast16_t;
typedef unsigned int uint_fast32_t;
__extension__
typedef unsigned long long int uint_fast64_t;
#endif
/* Types for `void *' pointers. */
#if __WORDSIZE == 64
# ifndef __intptr_t_defined
typedef long int intptr_t;
# define __intptr_t_defined
# endif
typedef unsigned long int uintptr_t;
#else
# ifndef __intptr_t_defined
typedef int intptr_t;
# define __intptr_t_defined
# endif
typedef unsigned int uintptr_t;
#endif
/* Largest integral types. */
#if __WORDSIZE == 64
typedef long int intmax_t;
typedef unsigned long int uintmax_t;
#else
__extension__
typedef long long int intmax_t;
__extension__
typedef unsigned long long int uintmax_t;
#endif
| test/unit/nvrtc/stdlib/stdint.h/0 | {
"file_path": "test/unit/nvrtc/stdlib/stdint.h",
"repo_id": "test",
"token_count": 1610
} | 55 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for TensorReduce family of device-wide operators
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/reduction/device/tensor_reduce.h"
#include "cutlass/functional.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This reduces the C dimension, transforming an NHWC tensor into NHWC with C=1.
template <typename TensorReduction, typename ElementCompute = typename TensorReduction::ElementCompute>
bool TestAllReduction_NHWC_reduce_c(ElementCompute reduction_identity = ElementCompute()) {
using Layout = typename TensorReduction::Layout;
using ElementOutput = typename TensorReduction::ElementOutput;
using ElementSource = typename TensorReduction::ElementSource;
int const kV = TensorReduction::kVectorLength;
int const N_indices[] = {3, 13};
int const H_indices[] = {5, 17};
int const W_indices[] = {7, 19};
int const C_indices[] = {2049, 2048, 2047, 384, 64, 48, 32, 24, 16, 12, 8, 6, 4, 3, 2, 1};
for (int N : N_indices) {
for (int H : H_indices) {
for (int W : W_indices) {
for (int Cx : C_indices) {
int C = Cx * kV;
cutlass::HostTensor<ElementSource, Layout> src_tensor({N, H, W, C});
cutlass::HostTensor<ElementOutput, Layout> dst_tensor({N, H, W, 1});
cutlass::reference::host::TensorFillRandomUniform(
src_tensor.host_view(), 17, 10, -10, 0);
dst_tensor.sync_device();
src_tensor.sync_device();
// Execute a tensor reduction over rank 3 (the 'C' dimension is reduced; NHWC => NHW)
TensorReduction reduction(src_tensor.extent(), 3);
cutlass::DeviceAllocation<uint8_t> device_workspace(reduction.workspace_size());
cutlass::Status status = reduction.reduce(
dst_tensor.device_ref(),
src_tensor.device_ref(),
device_workspace.get(),
reduction_identity
);
EXPECT_EQ(status, cutlass::Status::kSuccess);
EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess);
dst_tensor.sync_host();
typename TensorReduction::ReductionOp reduction_op;
//
// Reference check
//
for (int n = 0; n < src_tensor.extent().n(); ++n) {
for (int h = 0; h < src_tensor.extent().h(); ++h) {
for (int w = 0; w < src_tensor.extent().w(); ++w) {
ElementCompute c_accum = reduction_identity;
for (int c = 0; c < src_tensor.extent().c(); ++c) {
c_accum = reduction_op(c_accum, ElementCompute(src_tensor.at({n, h, w, c})));
}
ElementCompute got = ElementCompute(dst_tensor.at({n, h, w, 0}));
bool equal = (c_accum == got);
EXPECT_TRUE(equal);
if (!equal) {
std::cerr
<< "Error at location (" << n << ", " << h << ", " << w << ", 0)" << std::endl;
std::cerr
<< " expected: " << c_accum << std::endl
<< " got: " << got << std::endl;
std::cerr
<< "Problem: " << src_tensor.extent() << " -> "
<< dst_tensor.extent() << std::endl;
std::cerr
<< " Grid: " << reduction.reduction_strided.grid_shape
<< "\n Block: " << reduction.reduction_strided.threadblock_shape << std::endl
<< " FInal: " << reduction.reduction_strided.grid_final
<< "\n Block: " << reduction.reduction_strided.threadblock_final << "\n";
return false;
}
} //w
} // h
} // n
//
// Next problem
//
} // C
} // W
} // H
} // N
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x1) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x1_f16x1) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x2) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 2;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x2_f16x2) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 2;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x4_f16x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_maximum_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::maximum<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( -std::numeric_limits<float>::max() ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_minimum_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::minimum<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( std::numeric_limits<float>::max() ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ANY_c_s32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(0) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ALL_c_s32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(1) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ANY_c_f32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(0) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ALL_c_f32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(1) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/reduction/device/tensor_reduce_contiguous.cu/0 | {
"file_path": "test/unit/reduction/device/tensor_reduce_contiguous.cu",
"repo_id": "test",
"token_count": 5294
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/core_io.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace threadblock {
///
template <typename Iterator>
__global__ void kernel_gemm_threadblock_tensor_op_multiplicand_store(
typename Iterator::TensorRef ref_output,
typename Iterator::Element *input) {
// Construct fragment
typename Iterator::Fragment frag;
frag.clear();
// each thread loads a fragment
using AccessType = cutlass::Array<typename Iterator::Element, Iterator::ThreadMap::kElementsPerAccess>;
int const kElementsPerAccess = Iterator::ThreadMap::kElementsPerAccess;
int stride = Iterator::Shape::kContiguous;
int warp_id = (threadIdx.x / 32);
int lane_id = (threadIdx.x % 32);
input += (lane_id % 8) * kElementsPerAccess + (lane_id / 8) * stride;
input += (warp_id * Iterator::Shape::kStrided / Iterator::ThreadMap::Detail::kWarpCount) * stride;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Iterator::ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Iterator::ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < Iterator::ThreadMap::kElementsPerAccess; ++v) {
frag[v + Iterator::ThreadMap::kElementsPerAccess * (c + s * Iterator::ThreadMap::Iterations::kContiguous)] =
input[v + c * 64 + s * Iterator::ThreadMap::Delta::kStrided * stride];
}
}
}
// Use iterator to store results
Iterator iter(ref_output, threadIdx.x);
iter.store(frag);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Simple test environment
template <
typename Shape_,
int WarpCount
>
class MultiplicandTileIteratorTestbed {
public:
//
// Define iterator
//
using Shape = Shape_;
using Element = cutlass::half_t;
using Layout = cutlass::layout::TensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
static int const kAdvanceRank = 1;
static int const kThreads = 32 * WarpCount;
using ThreadMap = cutlass::transform::PitchLinearWarpRakedThreadMap<
Shape,
kThreads,
cutlass::layout::PitchLinearShape<8, 4>,
128 / cutlass::sizeof_bits<Element>::value
>;
using Iterator = cutlass::transform::threadblock::RegularTileIterator<
Shape, Element, Layout, kAdvanceRank, ThreadMap
>;
public:
//
// Members
//
cutlass::HostTensor<Element, Layout> destination_tensor;
cutlass::HostTensor<Element, cutlass::layout::PitchLinear> source_tensor;
public:
MultiplicandTileIteratorTestbed():
destination_tensor({Shape::kContiguous, Shape::kStrided}),
source_tensor({Shape::kContiguous, Shape::kStrided}) {
}
bool run() {
cutlass::reference::host::BlockFillSequential(
source_tensor.host_data(),
source_tensor.capacity()
);
cutlass::reference::host::BlockFillSequential(
destination_tensor.host_data(),
destination_tensor.capacity(),
Element(0),
Element(0)
);
//
// Launch kernel
//
dim3 grid(1,1);
dim3 block(kThreads, 1);
destination_tensor.sync_device();
source_tensor.sync_device();
test::gemm::threadblock::kernel_gemm_threadblock_tensor_op_multiplicand_store<Iterator><<<
grid, block
>>>(
destination_tensor.device_ref(),
source_tensor.device_data()
);
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " - CUDA ERROR: " << cudaGetErrorString(result);
destination_tensor.sync_host();
//
// Verify
//
// Verify that its contents match the destination
int errors = 0;
for (int s = 0; s < Shape::kStrided; ++s) {
for (int c = 0; c < Shape::kContiguous; ++c) {
if (errors >= 10) {
break;
}
Element expected = source_tensor.at({c, s});
Element got = destination_tensor.at({c, s});
bool passed = (expected == got);
if (!passed) {
++errors;
}
}
}
EXPECT_EQ(errors, 0)
<< source_tensor.host_view() << "\n\n" << destination_tensor.host_view() << std::endl;
return !errors;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x8_w1) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<64, 8>, 1>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x16_w1) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<64, 16>, 1>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x16_w2) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<64, 16>, 2>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 128x8_w1) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<128, 8>, 1>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x32_w4) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<64, 32>, 4>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 128x32_w1) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<128, 32>, 1>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 128x32_w4) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<128, 32>, 4>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 256x32_w4) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<256, 32>, 4>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 256x32_w8) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<256, 32>, 8>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/transform/threadblock/regular_tile_iterator_tensor_op.cu/0 | {
"file_path": "test/unit/transform/threadblock/regular_tile_iterator_tensor_op.cu",
"repo_id": "test",
"token_count": 3054
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for all CONV operation kinds in CUTLASS Library.
*/
#pragma once
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/kernel/default_conv2d_group_fprop.h"
#include "cutlass/conv/kernel/default_depthwise_fprop.h"
#include "cutlass/conv/kernel/default_conv2d_dgrad.h"
#include "cutlass/conv/kernel/default_conv2d_wgrad.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/conv/device/direct_convolution.h"
#include "cutlass/library/library.h"
#include "library_internal.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/core_io.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class Conv2dOperationBase : public Operation {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = Operator::kIteratorAlgorithm;
static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator;
using OperatorArguments = typename Operator::Arguments;
protected:
///
ConvDescription description_;
public:
/// Constructor
Conv2dOperationBase(char const *name = "unknown_conv2d") {
description_.name = name;
description_.provider = Provider::kCUTLASS;
description_.kind = OperationKind::kConv2d;
description_.conv_dim = Operator::kConvDim;
description_.iterator_algorithm = IteratorAlgorithmMap<Operator::kIteratorAlgorithm>::kId;
description_.tile_description.threadblock_shape = make_Coord(
Operator::ThreadblockShape::kM,
Operator::ThreadblockShape::kN,
Operator::ThreadblockShape::kK);
description_.tile_description.threadblock_stages = Operator::kStages;
description_.tile_description.warp_count = make_Coord(
Operator::UnderlyingKernel::WarpCount::kM,
Operator::UnderlyingKernel::WarpCount::kN,
Operator::UnderlyingKernel::WarpCount::kK);
description_.tile_description.math_instruction.instruction_shape = make_Coord(
Operator::InstructionShape::kM,
Operator::InstructionShape::kN,
Operator::InstructionShape::kK);
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
description_.tile_description.math_instruction.opcode_class =
OpcodeClassMap<typename Operator::OperatorClass>::kId;
description_.tile_description.math_instruction.math_operation =
MathOperationMap<typename Operator::MathOperator>::kId;
description_.tile_description.minimum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMin;
description_.tile_description.maximum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMax;
description_.A = make_TensorDescription<ElementA, LayoutA>();
description_.B = make_TensorDescription<ElementB, LayoutB>();
description_.C = make_TensorDescription<ElementC, LayoutC>();
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
// TODO: Add split k mode Serial and parallel to convolutions
// description_.split_k_mode = Operator::kSplitK ? SplitKMode::kSerial : SplitKMode::kNone;
}
/// Returns the description of the GEMM operation
virtual OperationDescription const & description() const {
return description_;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Conv2d library operation class for cutlass profiler
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class Conv2dOperation : public Conv2dOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
Conv2dOperation(char const *name = "unknown_conv2d_fprop") : Conv2dOperationBase<Operator_>(name) {
this->description_.conv_kind = ConvKindMap<kConvolutionalOperator>::kId;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
Conv2dConfiguration const *configuration) {
operator_args.problem_size = configuration->problem_size;
operator_args.ref_A =
{
nullptr,
LayoutA::packed(implicit_gemm_tensor_a_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.ref_B =
{
nullptr,
LayoutB::packed(implicit_gemm_tensor_b_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.ref_C =
{
nullptr,
LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.ref_D =
{
nullptr,
LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.split_k_mode = configuration->split_k_mode;
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
ConvArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.output_op = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.output_op = params;
}
else {
return Status::kErrorInvalidProblem;
}
operator_args.ref_A.reset(static_cast<ElementA *>(const_cast<void *>(arguments->A)));
operator_args.ref_B.reset(static_cast<ElementB *>(const_cast<void *>(arguments->B)));
operator_args.ref_C.reset(static_cast<ElementC *>(const_cast<void *>(arguments->C)));
operator_args.ref_D.reset(static_cast<ElementC *>(const_cast<void *>(arguments->D)));
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
Conv2dConfiguration const *configuration =
static_cast<Conv2dConfiguration const *>(configuration_ptr);
ConvArguments const *arguments =
static_cast<ConvArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<Conv2dConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
return Operator::get_workspace_size(args);
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<Conv2dConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
//std::cout << "initialize library::Conv2dOperation" << std::endl;
//print_operator_args(args);
return op->initialize(args, device_workspace, stream);
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<ConvArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args, device_workspace);
if (status != Status::kSuccess) {
return status;
}
//std::cout << "run library::Conv2dOperation" << std::endl;
//print_operator_args(args);
return op->run(stream);
}
/// Call print_operator_args from the Conv2dOperation::initialize()
// to dump arguments passed on to cutlass operator for debugging
void print_operator_args(OperatorArguments &operator_args) const {
std::cout << "Conv2dOperation::OperatorArguments" << std::endl
<< " problem_size:" << std::endl
<< operator_args.problem_size << std::endl
<< " split_k_mode: "
<< (operator_args.split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial" : "parallel") << std::endl
<< " epilogue (alpha, beta): "
<< operator_args.output_op.alpha << ", "
<< operator_args.output_op.beta << std::endl
<< " ref_A (ptr, {stride}): "
<< operator_args.ref_A.data() << ", {"
<< operator_args.ref_A.stride(0) << ", "
<< operator_args.ref_A.stride(1) << ", "
<< operator_args.ref_A.stride(2) << "}" << std::endl
<< " ref_B (ptr, {stride}): "
<< operator_args.ref_B.data() << ", {"
<< operator_args.ref_B.stride(0) << ", "
<< operator_args.ref_B.stride(1) << ", "
<< operator_args.ref_B.stride(2) << "}" << std::endl
<< " ref_C (ptr, {stride}): "
<< operator_args.ref_C.data() << ", {"
<< operator_args.ref_C.stride(0) << ", "
<< operator_args.ref_C.stride(1) << ", "
<< operator_args.ref_C.stride(2) << "}" << std::endl
<< " ref_D (ptr, {stride}): "
<< operator_args.ref_D.data() << ", {"
<< operator_args.ref_D.stride(0) << ", "
<< operator_args.ref_D.stride(1) << ", "
<< operator_args.ref_D.stride(2) << "}" << std::endl;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// DirectConv2d library operation class for cutlass profiler
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class DirectConv2dOperation : public Conv2dOperation<Operator_> {
public:
using Operator = Operator_;
using Base = Conv2dOperation<Operator_>;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
DirectConv2dOperation(char const *name = "unknown_direct)conv2d_fprop") : Conv2dOperation<Operator_>(name) {
this->description_.conv_kind = ConvKindMap<kConvolutionalOperator>::kId;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
Conv2dConfiguration const *configuration) {
operator_args.problem_size = configuration->problem_size;
operator_args.ref_A =
{
nullptr,
LayoutA::packed(implicit_gemm_tensor_a_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.ref_B =
{
nullptr,
LayoutB::packed(implicit_gemm_tensor_b_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.ref_reordered_B =
{
nullptr,
LayoutB::packed(implicit_gemm_tensor_b_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.ref_C =
{
nullptr,
LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.ref_D =
{
nullptr,
LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.split_k_mode = configuration->split_k_mode;
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
ConvArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.output_op = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.output_op = params;
}
else {
return Status::kErrorInvalidProblem;
}
operator_args.ref_A.reset(static_cast<ElementA *>(const_cast<void *>(arguments->A)));
operator_args.ref_B.reset(static_cast<ElementB *>(const_cast<void *>(arguments->B)));
operator_args.ref_C.reset(static_cast<ElementC *>(const_cast<void *>(arguments->C)));
operator_args.ref_D.reset(static_cast<ElementC *>(const_cast<void *>(arguments->D)));
operator_args.ref_reordered_B.reset(static_cast<ElementC *>(const_cast<void *>(arguments->reordered_B)));
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
Conv2dConfiguration const *configuration =
static_cast<Conv2dConfiguration const *>(configuration_ptr);
ConvArguments const *arguments =
static_cast<ConvArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<Conv2dConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
return Operator::get_workspace_size(args);
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<Conv2dConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
//std::cout << "initialize library::Conv2dOperation" << std::endl;
//print_operator_args(args);
return op->initialize(args, device_workspace, stream);
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<ConvArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args, device_workspace);
if (status != Status::kSuccess) {
return status;
}
//std::cout << "run library::Conv2dOperation" << std::endl;
//print_operator_args(args);
return op->run(stream);
}
/// Call print_operator_args from the Conv2dOperation::initialize()
// to dump arguments passed on to cutlass operator for debugging
void print_operator_args(OperatorArguments &operator_args) const {
std::cout << "Conv2dOperation::OperatorArguments" << std::endl
<< " problem_size:" << std::endl
<< operator_args.problem_size << std::endl
<< " split_k_mode: "
<< (operator_args.split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial" : "parallel") << std::endl
<< " epilogue (alpha, beta): "
<< operator_args.output_op.alpha << ", "
<< operator_args.output_op.beta << std::endl
<< " ref_A (ptr, {stride}): "
<< operator_args.ref_A.data() << ", {"
<< operator_args.ref_A.stride(0) << ", "
<< operator_args.ref_A.stride(1) << ", "
<< operator_args.ref_A.stride(2) << "}" << std::endl
<< " ref_B (ptr, {stride}): "
<< operator_args.ref_B.data() << ", {"
<< operator_args.ref_B.stride(0) << ", "
<< operator_args.ref_B.stride(1) << ", "
<< operator_args.ref_B.stride(2) << "}" << std::endl
<< " ref_C (ptr, {stride}): "
<< operator_args.ref_C.data() << ", {"
<< operator_args.ref_C.stride(0) << ", "
<< operator_args.ref_C.stride(1) << ", "
<< operator_args.ref_C.stride(2) << "}" << std::endl
<< " ref_D (ptr, {stride}): "
<< operator_args.ref_D.data() << ", {"
<< operator_args.ref_D.stride(0) << ", "
<< operator_args.ref_D.stride(1) << ", "
<< operator_args.ref_D.stride(2) << "}" << std::endl;
}
};
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/conv2d_operation.h/0 | {
"file_path": "tools/library/src/conv2d_operation.h",
"repo_id": "tools",
"token_count": 8020
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for all CONV operation kinds in CUTLASS Library
*/
#pragma once
#include <iostream>
#include <sstream>
#include <cstring>
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "cutlass/library/util.h"
#include "library_internal.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/device/convolution.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
Provider kProvider,
cutlass::conv::Operator ConvolutionalOperator,
int ConvDim,
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
struct ConvReferenceDispatcher;
/// Dispatcher for Conv2d (partially specialized for kConvDim == 2)
template <
Provider kProvider,
cutlass::conv::Operator kConvolutionalOperator,
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator,
typename ConvertOp,
typename InnerProductOp
>
struct ConvReferenceDispatcher<
kProvider,
kConvolutionalOperator,
2,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp> {
static Status dispatch(
void const *configuration,
ElementA *ptr_A,
ElementB *ptr_B,
ElementC *ptr_C,
ElementC *ptr_D,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr
) {
Conv2dConfiguration const &config =
*static_cast<Conv2dConfiguration const *>(configuration);
// TODO: make below code more general. It is fixed for NHWC now.
layout::TensorNHWC layout_a;
layout::TensorNHWC layout_b;
layout::TensorNHWC layout_c;
layout_a.stride() =
make_Coord(int32_t(config.stride_a[0]),
int32_t(config.stride_a[1]),
int32_t(config.stride_a[2]));
layout_b.stride() =
make_Coord(int32_t(config.stride_b[0]),
int32_t(config.stride_b[1]),
int32_t(config.stride_b[2]));
layout_c.stride() =
make_Coord(int32_t(config.stride_c[0]),
int32_t(config.stride_c[1]),
int32_t(config.stride_c[2]));
if (kProvider == Provider::kReferenceHost) {
cutlass::reference::host::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC ,
LayoutC,
ElementCompute,
ElementAccumulator,
ElementC,
ConvertOp,
InnerProductOp
>(
kConvolutionalOperator,
config.problem_size,
{ptr_A, layout_a},
{ptr_B, layout_b},
{ptr_C, layout_c},
{ptr_D, layout_c},
alpha,
beta
);
return Status::kSuccess;
}
else if (kProvider == Provider::kReferenceDevice) {
return cutlass::reference::device::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp
>(
kConvolutionalOperator,
config.problem_size,
{ptr_A, layout_a},
{ptr_B, layout_b},
{ptr_C, layout_c},
{ptr_D, layout_c},
alpha,
beta,
stream
);
}
return Status::kErrorNotSupported;
}
};
/// Dispatcher for Conv3d (partially specialized for kConvDim == 3)
template <
Provider kProvider,
cutlass::conv::Operator kConvolutionalOperator,
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator,
typename ConvertOp,
typename InnerProductOp
>
struct ConvReferenceDispatcher<
kProvider,
kConvolutionalOperator,
3,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp> {
static Status dispatch(
void const *configuration,
ElementA *ptr_A,
ElementB *ptr_B,
ElementC *ptr_C,
ElementC *ptr_D,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr
) {
Conv3dConfiguration const &config =
*static_cast<Conv3dConfiguration const *>(configuration);
ConvKind const conv_kind = ConvKindMap<kConvolutionalOperator>::kId;
if (kProvider == Provider::kReferenceHost) {
cutlass::reference::host::Conv3d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC ,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp
>(
kConvolutionalOperator,
config.problem_size,
{ptr_A, config.layout_a(conv_kind)},
{ptr_B, config.layout_b(conv_kind)},
{ptr_C, config.layout_c(conv_kind)},
{ptr_D, config.layout_c(conv_kind)},
alpha,
beta
);
return Status::kSuccess;
}
else if (kProvider == Provider::kReferenceDevice) {
return cutlass::reference::device::Conv3d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp
>(
kConvolutionalOperator,
config.problem_size,
{ptr_A, config.layout_a(conv_kind)},
{ptr_B, config.layout_b(conv_kind)},
{ptr_C, config.layout_c(conv_kind)},
{ptr_D, config.layout_c(conv_kind)},
alpha,
beta,
stream
);
}
return Status::kErrorNotSupported;
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
Provider Provider_,
cutlass::conv::Operator ConvolutionalOperator,
int ConvDim,
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
class ConvReferenceOperation : public Operation {
public:
static Provider const kProvider = Provider_;
static cutlass::conv::Operator const kConvolutionalOperator = ConvolutionalOperator;
static int const kConvDim = ConvDim;
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ElementCompute = ElementCompute_;
using ElementAccumulator = ElementAccumulator_;
using ConvertOp = ConvertOp_;
using InnerProductOp = InnerProductOp_;
protected:
/// Storage for the name string
std::string name_;
///
ConvDescription description_;
public:
/// Constructor
ConvReferenceOperation() {
// Basic information
description_.provider = kProvider;
description_.kind = (kConvDim == 2 ? OperationKind::kConv2d : OperationKind::kConv3d);
description_.conv_kind = ConvKindMap<kConvolutionalOperator>::kId;
description_.conv_dim = kConvDim;
// Tensor description
description_.A = make_TensorDescription<ElementA, LayoutA>();
description_.B = make_TensorDescription<ElementB, LayoutB>();
description_.C = make_TensorDescription<ElementC, LayoutC>();
// Epilogue compute and accumulator type description
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
// Iterator algorithm for convolution reference
description_.iterator_algorithm = IteratorAlgorithmID::kNone;
// Compute capability for convolution reference
description_.tile_description.minimum_compute_capability =
(kProvider == Provider::kReferenceDevice ? 50 : 0);
description_.tile_description.maximum_compute_capability = 1024;
// Procedural name
std::stringstream ss;
ss << "conv" << kConvDim << "d_" << to_string(description_.conv_kind)
<< "_reference_" << to_string(description_.provider)
<< "_" << to_string(description_.A.element) << to_string(description_.A.layout)
<< "_" << to_string(description_.B.element) << to_string(description_.B.layout)
<< "_" << to_string(description_.C.element) << to_string(description_.C.layout)
<< "_" << to_string(description_.tile_description.math_instruction.element_accumulator);
name_ = ss.str();
description_.name = name_.c_str();
// Epilogue compute and accumulator type description
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
}
/// Returns the description of the GEMM operation
virtual OperationDescription const & description() const {
return description_;
}
virtual Status can_implement(
void const *configuration,
void const *arguments) const {
return Status::kSuccess;
}
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
switch (kConvDim) {
case 2:
return sizeof(Conv2dConfiguration);
case 3:
return sizeof(Conv3dConfiguration);
default:
break;
}
return 0;
}
virtual uint64_t get_device_workspace_size(
void const *configuration,
void const *arguments = nullptr) const {
return 0;
}
virtual Status initialize(
void const *configuration,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
std::memcpy(host_workspace, configuration, get_host_workspace_size(configuration));
return Status::kSuccess;
}
virtual Status run(
void const *arguments,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
ConvArguments const &args = *static_cast<ConvArguments const *>(arguments);
ElementCompute alpha;
ElementCompute beta;
alpha = *static_cast<ElementCompute const *>(args.alpha);
beta = *static_cast<ElementCompute const *>(args.beta);
// TODO - respect pointer mode
// Invoke 2D or 3D convolution
return detail::ConvReferenceDispatcher<
kProvider,
kConvolutionalOperator,
kConvDim,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp
>::dispatch(
host_workspace,
static_cast<ElementA *>(const_cast<void *>(args.A)),
static_cast<ElementB *>(const_cast<void *>(args.B)),
static_cast<ElementC *>(const_cast<void *>(args.C)),
static_cast<ElementC *>(args.D),
alpha,
beta,
stream
);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructs Fprop reference operators.
template <
int kConvDim,
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_conv_fprop(Manifest &manifest) {
#if !defined(CUTLASS_PROFILER_DISABLE_REFERENCE)
manifest.append(new ConvReferenceOperation<
Provider::kReferenceHost,
cutlass::conv::Operator::kFprop,
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>);
manifest.append(new ConvReferenceOperation<
Provider::kReferenceDevice,
cutlass::conv::Operator::kFprop,
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>);
#endif // !defined(CUTLASS_PROFILER_DISABLE_REFERENCE)
}
/// Constructs Dgrad and Wgrad reference operators.
template <
int kConvDim,
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_conv_backwards(Manifest &manifest) {
#if !defined(CUTLASS_PROFILER_DISABLE_REFERENCE)
manifest.append(new ConvReferenceOperation<
Provider::kReferenceHost,
cutlass::conv::Operator::kDgrad,
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>);
manifest.append(new ConvReferenceOperation<
Provider::kReferenceDevice,
cutlass::conv::Operator::kDgrad,
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>);
manifest.append(new ConvReferenceOperation<
Provider::kReferenceHost,
cutlass::conv::Operator::kWgrad,
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>);
manifest.append(new ConvReferenceOperation<
Provider::kReferenceDevice,
cutlass::conv::Operator::kWgrad,
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>);
#endif // !defined(CUTLASS_PROFILER_DISABLE_REFERENCE)
}
/// Six operators for the price of one.
template <
int kConvDim,
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_conv_all(Manifest &manifest) {
make_conv_fprop<
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_conv_backwards<
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>(manifest);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/reference/conv_reference_operation.h/0 | {
"file_path": "tools/library/src/reference/conv_reference_operation.h",
"repo_id": "tools",
"token_count": 6532
} | 59 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
// Profiler includes
#include "cutlass/profiler/cutlass_profiler.h"
#include "cutlass/profiler/gemm_operation_profiler.h"
#include "cutlass/profiler/rank_k_operation_profiler.h"
#include "cutlass/profiler/rank_2k_operation_profiler.h"
#include "cutlass/profiler/trmm_operation_profiler.h"
#include "cutlass/profiler/symm_operation_profiler.h"
#include "cutlass/profiler/conv2d_operation_profiler.h"
#include "cutlass/profiler/conv3d_operation_profiler.h"
#include "cutlass/profiler/sparse_gemm_operation_profiler.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
CutlassProfiler::CutlassProfiler(
Options const &options
):
options_(options) {
operation_profilers_.emplace_back(new GemmOperationProfiler(options));
operation_profilers_.emplace_back(new SparseGemmOperationProfiler(options));
operation_profilers_.emplace_back(new Conv2dOperationProfiler(options));
operation_profilers_.emplace_back(new Conv3dOperationProfiler(options));
operation_profilers_.emplace_back(new RankKOperationProfiler(options));
operation_profilers_.emplace_back(new Rank2KOperationProfiler(options));
operation_profilers_.emplace_back(new TrmmOperationProfiler(options));
operation_profilers_.emplace_back(new SymmOperationProfiler(options));
}
CutlassProfiler::~CutlassProfiler() {
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Execute the program
int CutlassProfiler::operator()() {
if (options_.cmdline.num_naked_args() > 0) {
std::cerr << "Unknown args: \n";
options_.cmdline.print_naked_args(std::cerr);
std::cerr << "\n\n\n";
print_usage_(std::cout);
return 1;
}
if (options_.about.help) {
if (options_.operation_kind == library::OperationKind::kInvalid) {
print_usage_(std::cout);
}
else {
for (auto & profiler : operation_profilers_) {
if (profiler->kind() == options_.operation_kind) {
profiler->print_usage(std::cout);
profiler->print_examples(std::cout);
return 0;
}
}
}
return 0;
}
else if (options_.about.version) {
options_.about.print_version(std::cout);
std::cout << std::endl;
return 0;
}
else if (options_.about.device_info) {
options_.device.print_device_info(std::cout);
return 0;
}
if (options_.execution_mode == ExecutionMode::kProfile ||
options_.execution_mode == ExecutionMode::kDryRun ||
options_.execution_mode == ExecutionMode::kTrace) {
// Profiles all operations
return profile_();
}
else if (options_.execution_mode == ExecutionMode::kEnumerate) {
// Enumerates all operations
enumerate_();
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumerates all operations
void CutlassProfiler::enumerate_() {
}
/// Profiles all operations
int CutlassProfiler::profile_() {
// Keep track of all device memory tensor in map
DeviceContext device_context;
int result = 0;
// For all profilers (e.g. gemm/sparse_gemm/conv2d...)
for (auto & profiler : operation_profilers_) {
if (options_.operation_kind == library::OperationKind::kInvalid ||
options_.operation_kind == profiler->kind()) {
result = profiler->profile_all(options_, library::Singleton::get().manifest, device_context);
// If some profile failed, terminate immediately
if (result) {
return result;
}
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Prints all options
void CutlassProfiler::print_usage_(std::ostream &out) {
options_.print_usage(out);
out << "\nOperations:\n\n";
// For all profilers
for (auto & profiler : operation_profilers_) {
std::string kind_str = library::to_string(profiler->kind());
size_t kAlignment = 40;
size_t columns = 0;
if (kind_str.size() < kAlignment) {
columns = kAlignment - kind_str.size();
}
out << " " << kind_str << std::string(columns, ' ') << profiler->description() << "\n";
}
out << "\n\nFor details about a particular function, specify the function name with --help.\n\nExample:\n\n"
<< " $ cutlass_profiler --operation=Gemm --help\n\n"
<< " $ cutlass_profiler --operation=RankK --help\n\n"
<< " $ cutlass_profiler --operation=Trmm --help\n\n"
<< " $ cutlass_profiler --operation=Symm --help\n\n"
<< " $ cutlass_profiler --operation=Conv3d --help\n\n"
<< " $ cutlass_profiler --operation=Conv2d --help\n\n"
<< " $ cutlass_profiler --operation=SparseGemm --help\n\n"
;
}
/// Prints usage
void CutlassProfiler::print_options_(std::ostream &out) {
options_.print_options(out);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Initializes the CUDA device
void CutlassProfiler::initialize_device_() {
cudaError_t result = cudaSetDevice(options_.device.device);
if (result != cudaSuccess) {
std::cerr << "Failed to set device.";
throw std::runtime_error("Failed to set device");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/cutlass_profiler.cu/0 | {
"file_path": "tools/profiler/src/cutlass_profiler.cu",
"repo_id": "tools",
"token_count": 2287
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cutlass/profiler/cublas_helpers.h"
#include "cutlass/profiler/trmm_operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
TrmmOperationProfiler::TrmmOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kTrmm,
{
{ArgumentTypeID::kEnumerated, {"trmm_kind"}, "Variant of TRMM (universal)"},
{ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the TRMM problem space"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the TRMM problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kEnumerated, {"side_mode"}, "Side Mode for TRMM (left, right)"},
{ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for TRMM (lower, upper)"},
{ArgumentTypeID::kEnumerated, {"diag_type"}, "Diag Type for TRMM (nonunit, unit)"},
{ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
{ArgumentTypeID::kTensor, {"D"}, "Tensor storing the D operand"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of TRMMs computed in one batch"},
},
{ library::Provider::kCUBLAS}
) {
description_ = " Triangular Matrix-Multiplication. D = alpha * A * B or alpha * B * A";
}
/// Destructor
TrmmOperationProfiler::~TrmmOperationProfiler() {
}
/// Prints usage statement for the math function
void TrmmOperationProfiler::print_usage(std::ostream &out) const {
out << "TRMM" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void TrmmOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size:\n"
<< " $ cutlass_profiler --operation=Trmm --n=1024 --m=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=Trmm --n=1024:4096:256 --m=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=Trmm --accumulator-type=f16,f32\n\n"
<< "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=Trmm --A=f16:column or --A=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=Trmm --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=Trmm --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=Trmm --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=Trmm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to trmm kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=Trmm \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status TrmmOperationProfiler::TrmmProblem::parse(
library::TrmmDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!arg_as_int(this->m, "m", problem_space, problem)) {
// default value
this->m = 1024;
}
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.D, "D", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
if (operation_desc.side_mode == SideMode::kLeft) {
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->m), int(this->m)}).front();
}
else if (operation_desc.side_mode == SideMode::kRight) {
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->n), int(this->n)}).front();
}
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->m), int(this->n)}).front();
this->ldd = DeviceAllocation::get_packed_layout(
operation_desc.D.layout, {int(this->m), int(this->n)}).front();
return Status::kSuccess;
}
/// Initializes a performance result
void TrmmOperationProfiler::TrmmProblem::initialize_result(
PerformanceResult &result,
library::TrmmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "trmm_kind", problem_space, library::to_string(operation_desc.trmm_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "side_mode", problem_space, library::to_string(operation_desc.side_mode));
set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode));
set_argument(result, "diag_type", problem_space, library::to_string(operation_desc.diag_type));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
set_argument(result, "D", problem_space,
std::string(library::to_string(operation_desc.D.element)) + ":" + library::to_string(operation_desc.D.layout));
set_argument(result, "m", problem_space, m);
set_argument(result, "n", problem_space, n);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status TrmmOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::TrmmDescription const &operation_desc =
static_cast<library::TrmmDescription const &>(operation->description());
if (operation_desc.trmm_kind != library::TrmmKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
trmm_workspace_.configuration.problem_size.m() = int(problem_.m);
trmm_workspace_.configuration.problem_size.n() = int(problem_.n);
trmm_workspace_.configuration.problem_size.k() = (operation_desc.side_mode == SideMode::kLeft)
? int(problem_.m) : int(problem_.n);
trmm_workspace_.configuration.lda = problem_.lda;
trmm_workspace_.configuration.ldb = problem_.ldb;
trmm_workspace_.configuration.ldd = problem_.ldd;
//trmm_workspace_.configuration.split_k_slices = int(problem_.split_k_slices);
trmm_workspace_.configuration.batch_count = int(problem_.split_k_slices);
trmm_workspace_.arguments.A = nullptr;
trmm_workspace_.arguments.B = nullptr;
trmm_workspace_.arguments.D = nullptr;
trmm_workspace_.arguments.alpha = problem_.alpha.data();
trmm_workspace_.arguments.beta = problem_.beta.data();
trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&trmm_workspace_.configuration, &trmm_workspace_.arguments);
}
/// Initializes the performance result
void TrmmOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::TrmmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
if (operation_desc.side_mode == SideMode::kLeft) {
// Input bytes read and Output bytes written for the trmm problem
result.bytes =
// Half matrix including the diagonal will have (M*(M+1))/2 elements
int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) * (problem_.m + 1) / 2 +
int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.m / 8) * problem_.n +
int64_t(library::sizeof_bits(operation_desc.D.element) * problem_.m / 8) * problem_.n;
} else if (operation_desc.side_mode == SideMode::kRight) {
// Input bytes read and Output bytes written for the trmm problem
result.bytes =
// Half matrix including the diagonal will have (N*(N+1))/2 elements
int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.n / 8) * (problem_.n + 1) / 2 +
int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.m / 8) * problem_.n +
int64_t(library::sizeof_bits(operation_desc.D.element) * problem_.m / 8) * problem_.n;
}
// FLOPs = 2 * [ ( M * (M+1)/2 * N ) ] // Beta is zero
result.flops = problem_.m * (problem_.m + 1) * problem_.n;
result.runtime = 0;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
result.flops *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
result.flops *= 4;
break;
default: break;
}
}
/// Initializes workspace
Status TrmmOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::TrmmDescription const &operation_desc =
static_cast<library::TrmmDescription const &>(operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
int seed_shift = 0;
if (operation_desc.side_mode == SideMode::kLeft) {
trmm_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.m), int(problem_.m)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
} else if (operation_desc.side_mode == SideMode::kRight) {
trmm_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
}
trmm_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldb)},
1, // batch_count
seed_shift++
);
trmm_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.D.element,
operation_desc.D.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldd)}
);
trmm_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.D.element,
operation_desc.D.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldd)}
);
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(&trmm_workspace_.configuration);
trmm_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(&trmm_workspace_.configuration);
trmm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = operation->initialize(
&trmm_workspace_.configuration,
trmm_workspace_.host_workspace.data(),
trmm_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kTrmm;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool TrmmOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing TRMM arguments
trmm_workspace_.arguments.A = trmm_workspace_.A->data();
trmm_workspace_.arguments.B = trmm_workspace_.B->data();
trmm_workspace_.arguments.D = trmm_workspace_.Computed->data();
trmm_workspace_.arguments.alpha = problem_.alpha.data();
trmm_workspace_.arguments.beta = problem_.beta.data();
trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&trmm_workspace_.arguments,
trmm_workspace_.host_workspace.data(),
trmm_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const & trmm_desc = static_cast<library::TrmmDescription const &>(operation->description());
if (cublas_satisfies(trmm_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool TrmmOperationProfiler::verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
#if CUTLASS_ENABLE_CUBLAS
library::TrmmDescription const &trmm_desc =
static_cast<library::TrmmDescription const &>(operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
cublasStatus_t status = handle.get_cublas_create_status();
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to cublas<t>Trmm()
//
// Initialize structure containing TRMM arguments
trmm_workspace_.arguments.A = trmm_workspace_.A->data();
trmm_workspace_.arguments.B = trmm_workspace_.B->data();
trmm_workspace_.arguments.D = trmm_workspace_.Reference->data();
trmm_workspace_.arguments.alpha = problem_.alpha.data();
trmm_workspace_.arguments.beta = problem_.beta.data();
trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
detail::cublasTrmmDispatcher trmm_op(
trmm_desc,
trmm_workspace_.configuration,
trmm_workspace_.arguments
);
if (trmm_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = trmm_op(handle);
// Handle errors
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
options,
*trmm_workspace_.Computed,
*trmm_workspace_.Reference
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
trmm_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool TrmmOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing TRMM arguments
trmm_workspace_.arguments.A = trmm_workspace_.A->data();
trmm_workspace_.arguments.B = trmm_workspace_.B->data();
trmm_workspace_.arguments.D = trmm_workspace_.Computed->data();
trmm_workspace_.arguments.alpha = problem_.alpha.data();
trmm_workspace_.arguments.beta = problem_.beta.data();
trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&trmm_workspace_.arguments,
trmm_workspace_.host_workspace.data(),
trmm_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/trmm_operation_profiler.cu/0 | {
"file_path": "tools/profiler/src/trmm_operation_profiler.cu",
"repo_id": "tools",
"token_count": 8624
} | 61 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/*! \file
\brief This header contains a class to parametrize a statistical distribution function.
*/
#include <ostream>
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Distribution type
struct Distribution {
/// Variant types
enum Kind { Invalid, Uniform, Gaussian, Identity, Sequential, AllZeros, AllOnes };
/// Distribution state
union {
/// Uniform distribution
struct {
double min;
double max;
} uniform;
/// Gaussian distribution
struct {
double mean;
double stddev;
double pnz;
double pnzA;
double pnzB;
double pnzC;
} gaussian;
/// Elements are linear combination of row and column index
struct {
double start;
double delta;
} sequential;
};
/// Active variant kind
Kind kind;
/// Random values are cast to integer after scaling by this power of two
int int_scale;
//
// Methods
//
Distribution() : kind(Invalid), int_scale(0) {}
/// Configures distribution as uniform random
Distribution &set_uniform(double _min, double _max, int _int_scale = 0) {
kind = Uniform;
uniform.min = _min;
uniform.max = _max;
int_scale = _int_scale;
return *this;
}
/// Configures distribution as Gaussian distribution
Distribution &set_gaussian(double _mean, double _stddev, int _int_scale = 0, double _pnz = 100.0) {
kind = Gaussian;
gaussian.mean = _mean;
gaussian.stddev = _stddev;
gaussian.pnz = _pnz;
int_scale = _int_scale;
return *this;
}
/// Sets identity
Distribution &set_identity() {
kind = Identity;
return *this;
}
/// Sets sequential
Distribution &set_sequential(double start, double delta, int _int_scale = 0) {
kind = Sequential;
sequential.start = start;
sequential.delta = delta;
int_scale = _int_scale;
return *this;
}
};
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Prints a Distribution to ostream
inline std::ostream &operator<<(std::ostream &out, cutlass::Distribution const &dist) {
switch (dist.kind) {
case cutlass::Distribution::Uniform:
out << "uniform, min: " << dist.uniform.min << ", max: " << dist.uniform.max;
break;
case cutlass::Distribution::Gaussian:
out << "gaussian, mean: " << dist.gaussian.mean << ", stddev: " << dist.gaussian.stddev
<< ", pnzA: " << dist.gaussian.pnzA << ", pnzB: "
<< dist.gaussian.pnzB << ", pnzC: " << dist.gaussian.pnzC;
break;
case cutlass::Distribution::Identity:
out << "identity";
break;
case cutlass::Distribution::Sequential:
out << "sequential";
break;
default:
out << "unknown";
}
out << ", int_scale: " << dist.int_scale;
return out;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/distribution.h/0 | {
"file_path": "tools/util/include/cutlass/util/distribution.h",
"repo_id": "tools",
"token_count": 1485
} | 62 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for complex-valued GEMM in device code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/complex.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_ref_planar_complex.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
namespace cutlass {
namespace reference {
namespace device {
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
////////////////////////////////////////////////////////////////////////////////////////////////////
static int const kGemmPlanarComplexBlockSize = 4;
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename ConvertOp = NumericConverter<ElementC, ScalarType>,
typename InnerProductOp = multiply_add<complex<ComputeType>>
>
__global__ void GemmPlanarComplex(
gemm::GemmCoord problem_size,
complex<ScalarType> alpha,
TensorRefPlanarComplex<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRefPlanarComplex<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
complex<ScalarType> beta,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_c,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_d,
complex<ComputeType> initial_accum) {
int const kMblock = kGemmPlanarComplexBlockSize;
int const kNblock = kGemmPlanarComplexBlockSize;
using ComplexA = typename TensorRefPlanarComplex<ElementA, LayoutA>::ComplexElement;
using ComplexB = typename TensorRefPlanarComplex<ElementB, LayoutB>::ComplexElement;
using ComplexC = typename TensorRefPlanarComplex<ElementC, LayoutC>::ComplexElement;
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
int const K = problem_size.k();
ConvertOp convert_op;
InnerProductOp inner_product_op;
complex<ComputeType> accum[kMblock][kNblock];
int row_block = (blockIdx.x * blockDim.x + threadIdx.x) * kMblock;
int col_block = (blockIdx.y * blockDim.y + threadIdx.y) * kNblock;
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kNblock; j++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kMblock; i++) {
accum[i][j] = initial_accum;
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int k_block = 0; k_block < K; ++k_block) {
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kNblock; j++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kMblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N) {
ComplexA a_ik = tensor_a.at(MatrixCoord(row, k_block));
ComplexB b_kj = tensor_b.at(MatrixCoord(k_block, col));
complex<ComputeType> a = complex<ComputeType>{
ComputeType(a_ik.real()),
ComputeType(a_ik.imag())
};
complex<ComputeType> b = complex<ComputeType>{
ComputeType(b_kj.real()),
ComputeType(b_kj.imag())
};
if (transform_a == ComplexTransform::kConjugate) {
a = conj(a);
}
if (transform_b == ComplexTransform::kConjugate) {
b = conj(b);
}
accum[i][j] = inner_product_op(a, b, accum[i][j]);
}
}
}
}
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kNblock; j++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kMblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N) {
complex<ScalarType> acc{
ScalarType(accum[i][j].real()),
ScalarType(accum[i][j].imag())
};
ComplexC c_ij = ComplexC();
if (beta.real() != ScalarType() || beta.imag() != ScalarType()) {
c_ij = tensor_c.at(coord);
}
complex<ScalarType> src{
ScalarType(c_ij.real()),
ScalarType(c_ij.imag())
};
complex<ScalarType> result = alpha * acc + beta * src;
ComplexC d_ij;
d_ij.real() = convert_op(result.real());
d_ij.imag() = convert_op(result.imag());
tensor_d.at(coord) = d_ij;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename ConvertOp = NumericConverter<ElementC, ScalarType>,
typename InnerProductOp = multiply_add<complex<ComputeType>>
>
void GemmPlanarComplex(
gemm::GemmCoord problem_size,
complex<ScalarType> alpha,
TensorRefPlanarComplex<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRefPlanarComplex<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
complex<ScalarType> beta,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_c,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_d,
complex<ComputeType> initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
int const kMblock = kernel::kGemmPlanarComplexBlockSize;
int const kNblock = kernel::kGemmPlanarComplexBlockSize;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * kMblock - 1) / (block.x * kMblock),
(problem_size.n() + block.y * kNblock - 1) / (block.y * kNblock),
1);
kernel::GemmPlanarComplex<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ScalarType,
ComputeType,
ConvertOp,
InnerProductOp
><<< grid, block >>>(
problem_size,
alpha,
tensor_a,
transform_a,
tensor_b,
transform_b,
beta,
tensor_c,
tensor_d,
initial_accum
);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// This assumes the accumulator type is the same type as the scalars.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType
>
void GemmPlanarComplex(
gemm::GemmCoord problem_size,
complex<ScalarType> alpha,
TensorRefPlanarComplex<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRefPlanarComplex<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
complex<ScalarType> beta,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_c,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_d) {
GemmPlanarComplex(
problem_size,
alpha,
tensor_a, transform_a,
tensor_b, transform_b,
beta,
tensor_c,
tensor_d,
complex<ScalarType>());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/device/gemm_planar_complex.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/device/gemm_planar_complex.h",
"repo_id": "tools",
"token_count": 3432
} | 63 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cmath>
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/util/reference/detail/linear_to_coordinate.h"
#include "cutlass/core_io.h"
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side
/// workspace
template <
typename Element,
typename Layout,
typename ComputeType,
typename ReduceOp,
typename TransformOp
>
ComputeType TensorTransformReduce(
TensorView<Element, Layout> view,
ComputeType identity,
ReduceOp reduce,
TransformOp transform
) {
for (int64_t idx = 0; idx < int64_t(view.size()); ++idx) {
typename Layout::TensorCoord coord;
cutlass::reference::detail::LinearToCoordinate<Layout::kRank>()(coord, idx, view.extent());
if (view.contains(coord)) {
Element x = view.at(coord);
identity = reduce(identity, transform(x));
}
}
return identity;
}
/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side
/// workspace
template <
typename Element,
typename Layout,
typename ComputeType,
typename ReduceOp,
typename TransformOp
>
ComputeType TensorTransformReduce(
TensorView<Element, Layout> view_A,
TensorView<Element, Layout> view_B,
ComputeType identity,
ReduceOp reduce,
TransformOp transform) {
if (view_A.extent() != view_B.extent()) {
throw std::runtime_error("Tensor extents must match.");
}
for (int64_t idx = 0; idx < int64_t(view_A.size()); ++idx) {
typename Layout::TensorCoord coord;
cutlass::reference::detail::LinearToCoordinate<Layout::kRank>()(coord, idx, view_A.extent());
if (view_A.contains(coord)) {
Element a = view_A.at(coord);
Element b = view_B.at(coord);
identity = reduce(identity, transform(a, b));
}
}
return identity;
}
/// Helper to compute the sum of the elements of a tensor
template <
typename Element,
typename Layout,
typename ComputeType = Element
>
ComputeType TensorSum(
TensorView<Element, Layout> view,
ComputeType identity = ComputeType()
) {
plus<ComputeType> reduce;
NumericConverter<ComputeType, Element> transform;
return TensorTransformReduce(
view, identity, reduce, transform);
}
/// Helper to compute the sum of the squares of the elements of a tensor
template <
typename Element,
typename Layout,
typename ComputeType = Element
>
ComputeType TensorSumSq(
TensorView<Element, Layout> view,
ComputeType identity = ComputeType()
) {
plus<ComputeType> reduce;
magnitude_squared<Element, ComputeType> transform;
return TensorTransformReduce(
view, identity, reduce, transform);
}
/// Helper to compute the norm of the elements of a tensor.
template <
typename Element,
typename Layout,
typename ComputeType = double
>
ComputeType TensorNorm(
TensorView<Element, Layout> view,
ComputeType identity = ComputeType()
) {
return std::sqrt(TensorSumSq(view, identity));
}
/// Helper to compute the sum of the squares of the differences of two tensors
template <
typename Element,
typename Layout,
typename ComputeType = double
>
ComputeType TensorSumSqDiff(
TensorView<Element, Layout> view_A,
TensorView<Element, Layout> view_B,
ComputeType identity = ComputeType()
) {
plus<ComputeType> reduce;
magnitude_squared_difference<Element, ComputeType> transform;
return TensorTransformReduce(
view_A, view_B, identity, reduce, transform);
}
/// Helper to compute the norm of the tensor computed as the difference of two tensors in memory
template <
typename Element,
typename Layout,
typename ComputeType = double
>
ComputeType TensorNormDiff(
TensorView<Element, Layout> view_A,
TensorView<Element, Layout> view_B,
ComputeType identity = ComputeType()
) {
return std::sqrt(TensorSumSqDiff(view_A, view_B, identity));
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/host/tensor_reduce.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/tensor_reduce.h",
"repo_id": "tools",
"token_count": 1810
} | 64 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS layout visualization example
*/
#include <map>
#include <memory>
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm70.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "visualize_layout.h"
#include "register_layout.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
void RegisterLayouts(std::map<std::string, std::unique_ptr<VisualizeLayoutBase> > &layouts) {
struct {
char const *name;
VisualizeLayoutBase *ptr;
} layout_pairs[] = {
{"PitchLinear", new VisualizeLayout<cutlass::layout::PitchLinear>},
{"ColumnMajor", new VisualizeLayout<cutlass::layout::ColumnMajor>},
{"RowMajor", new VisualizeLayout<cutlass::layout::RowMajor>},
{"ColumnMajorInterleaved<4>",
new VisualizeLayout<cutlass::layout::ColumnMajorInterleaved<4>>},
{"RowMajorInterleaved<4>",
new VisualizeLayout<cutlass::layout::RowMajorInterleaved<4>>},
// All Ampere/Turing H/Integer matrix multiply tensor core kernels uses the same swizzling
// layout implementation with different templates.
//
// mma.sync.aligned.m8n8k128.s32.b1.b1.s32 Interleaved-256
// mma.sync.aligned.m16n8k256.s32.b1.b1.s32 Interleaved-256
{"TensorOpMultiplicand<1,256>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<1, 256>>},
// mma.sync.aligned.m8n8k128.s32.b1.b1.s32 TN kblock512
// mma.sync.aligned.m16n8k256.s32.b1.b1.s32 TN kblock512
{"TensorOpMultiplicand<1,512>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<1, 512>>},
// mma.sync.aligned.m16n8k256.s32.b1.b1.s32 TN kblock1024
{"TensorOpMultiplicand<1,1024>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<1, 1024>>},
// Integer matrix multiply.int4 8832 Interleaved-64
// Integer matrix multiply.int4 16864 Interleaved-64
{"TensorOpMultiplicand<4,64>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<4, 64>>},
// Integer matrix multiply.int4 8832 TN kblock128
// Integer matrix multiply.int4 16864 TN kblock128
{"TensorOpMultiplicand<4,128>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<4, 128>>},
// Integer matrix multiply.int4 16864 TN kblock256
{"TensorOpMultiplicand<4,256>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<4, 256>>},
// Integer matrix multiply 8816 Interleaved-32
// Integer matrix multiply 16832 Interleaved-32
{"TensorOpMultiplicand<8,32>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<8, 32>>},
// Integer matrix multiply 8816 TN kblock64
// Integer matrix multiply 16832 TN kblock64
{"TensorOpMultiplicand<8,64>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<8, 64>>},
// Integer matrix multiply 16832 TN kblock128
{"TensorOpMultiplicand<8,128>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<8, 128>>},
// Matrix Multiply 1688 TN kblock32
// Matrix multiply 16816 TN kblock32
{"TensorOpMultiplicand<16,32>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<16, 32>>},
// Matrix multiply 1688 NT
// Matrix multiply 16816 NT
// Matrix multiply 16816 TN kblock64
{"TensorOpMultiplicand<16,64>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<16, 64>>},
// Matrix multiply 1688.TF32 TN kblock16
{"TensorOpMultiplicand<32,16>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<32, 16>>},
// Matrix multiply 1688.TF32 TN kblock32
{"TensorOpMultiplicand<32,32>",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<32, 32>>},
// Matrix multiply 1688 NT
{"TensorOpMultiplicandCongruous<32,32>",
new VisualizeLayout<
cutlass::layout::TensorOpMultiplicandCongruous<32, 32>>},
// Matrix multiply 884 NT
{"TensorOpMultiplicandCongruous<64,16>",
new VisualizeLayout<
cutlass::layout::TensorOpMultiplicandCongruous<64, 16>>},
// Matrix multiply 884 TN
{"TensorOpMultiplicand64bCrosswise",
new VisualizeLayout<cutlass::layout::TensorOpMultiplicand64bCrosswise>},
{"TensorOpMultiplicandCongruous<128,4>",
new VisualizeLayout<
cutlass::layout::TensorOpMultiplicandCongruous<128, 4>>},
{"TensorOpMultiplicandCrosswise<128,4>",
new VisualizeLayout<
cutlass::layout::TensorOpMultiplicandCrosswise<128, 4>>},
{"VoltaTensorOpMultiplicandCongruous<16>",
new VisualizeLayout<
cutlass::layout::VoltaTensorOpMultiplicandCongruous<16>>},
{"VoltaTensorOpMultiplicandCrosswise<16,32>",
new VisualizeLayout<
cutlass::layout::VoltaTensorOpMultiplicandCrosswise<16, 32>>}
};
for (auto layout : layout_pairs) {
layouts.emplace(std::string(layout.name), std::unique_ptr<VisualizeLayoutBase>(layout.ptr));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/03_visualize_layout/register_layout.cu/0 | {
"file_path": "examples/03_visualize_layout/register_layout.cu",
"repo_id": "examples",
"token_count": 2565
} | 0 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/semaphore.h"
#include "kernel/b2b_gemm_grouped_problem_visitor.h"
#include "threadblock/grouped_threadblock_swizzle.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
namespace detail {
/// Utility struct for returning the type of the problem visitor used by the swizzling function,
/// if it is a grouped swizzling function, or a default visitor. This is used only for defining
/// the parameters of the problem visitor used in GroupedParams.
template <
typename B2bMma_,
typename ThreadblockSwizzle_,
typename Enable = void
>
struct ProblemVisitorOrDefault;
/// Return a generic problem visitor for GEMM problems
template <
typename B2bMma_,
typename ThreadblockSwizzle_
>
struct ProblemVisitorOrDefault<B2bMma_,
ThreadblockSwizzle_,
typename platform::enable_if<
! cutlass::gemm::threadblock::detail::IsGroupedSwizzle<ThreadblockSwizzle_>::value
>::type> {
using value = B2bGemmGroupedProblemVisitor<typename B2bMma_::Shape,
GroupScheduleMode::kDeviceOnly,
128,
128,
platform::is_same<typename B2bMma_::LayoutC,
cutlass::layout::ColumnMajor>::value>;
};
/// Return the problem visitor specified by the swizzling function
template <
typename B2bMma_,
typename ThreadblockSwizzle_
>
struct ProblemVisitorOrDefault<B2bMma_,
ThreadblockSwizzle_,
typename platform::enable_if<
cutlass::gemm::threadblock::detail::IsGroupedSwizzle<ThreadblockSwizzle_>::value
>::type> {
using value = typename ThreadblockSwizzle_::ProblemVisitor;
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename B2bMma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct B2bGemm {
using B2bMma = B2bMma_;
using Epilogue = Epilogue_;
using OutputOp0 = typename B2bMma::OutputOp;
using OutputOp1 = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA0 = typename B2bMma::IteratorA0::Element;
using LayoutA0 = typename B2bMma::IteratorA0::Layout;
using ElementB0 = typename B2bMma::IteratorB0::Element;
using LayoutB0 = typename B2bMma::IteratorB0::Layout;
using ElementB1 = typename B2bMma::IteratorB1::Element;
using LayoutB1 = typename B2bMma::IteratorB1::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
using ScaleBiasData = typename B2bMma::IteratorAccumulatorScaleBias::Element;
/// Data types needed for higher-level containers. In some cases, a single type must be exposed
/// despite the B2b GEMM using two GEMMs under the hood. In such cases, we select the values from
/// the second GEMM (other than for ElementA/ElementB)
using ElementA = typename B2bMma::IteratorA0::Element;
using LayoutA = typename B2bMma::IteratorA0::Layout;
using ElementB = typename B2bMma::IteratorB0::Element;
using LayoutB = typename B2bMma::IteratorB0::Layout;
static ComplexTransform const kTransformA = B2bMma::kTransformA;
static ComplexTransform const kTransformB = B2bMma::kTransformB;
using Operator = typename B2bMma::Operator0;
using OperatorClass = typename Operator::OperatorClass;
using ThreadblockShape = typename B2bMma::Shape0;
using WarpShape = typename Operator::Shape;
using InstructionShape = typename Operator::InstructionShape;
using ArchTag = typename B2bMma::ArchTag;
static int const kStages = B2bMma::kStages;
static int const kAlignmentA = B2bMma::IteratorA::AccessType::kElements;
static int const kAlignmentB = B2bMma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
using Mma = B2bMma;
using EpilogueOutputOp = OutputOp1;
/// Warp count (concept: GemmShape)
using WarpCount0 = typename B2bMma::WarpCount0;
static int const kThreadCount = 32 * WarpCount0::kCount;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
GemmCoord problem_size_0{0,0,0};
GemmCoord problem_size_1{0,0,0};
typename B2bMma::IteratorA0::TensorRef ref_A0{};
typename B2bMma::IteratorB0::TensorRef ref_B0{};
typename Epilogue::OutputTileIterator::TensorRef ref_C0{};
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Scale0{};
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Bias0{};
typename B2bMma::IteratorB1::TensorRef ref_B1{};
typename Epilogue::OutputTileIterator::TensorRef ref_C1{};
typename Epilogue::OutputTileIterator::TensorRef ref_D1{};
int64_t batch_stride_A0{0};
int64_t batch_stride_B0{0};
int64_t batch_stride_B1{0};
int64_t batch_stride_C1{0};
int64_t batch_stride_D1{0};
int64_t batch_stride_Bias0{0};
int64_t batch_stride_Scale0{0};
typename OutputOp0::Params epilogue0 {};
typename OutputOp1::Params epilogue1 {};
int batch_count{1};
//
// Methods
//
/// Default ctor
Arguments() = default;
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmUniversalMode mode_,
GemmCoord problem_size_0_,
GemmCoord problem_size_1_,
typename B2bMma::IteratorA0::TensorRef ref_A0_,
typename B2bMma::IteratorB0::TensorRef ref_B0_,
typename Epilogue::OutputTileIterator::TensorRef ref_C0_,
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Scale0_,
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Bias0_,
typename B2bMma::IteratorB1::TensorRef ref_B1_,
typename Epilogue::OutputTileIterator::TensorRef ref_C1_,
typename Epilogue::OutputTileIterator::TensorRef ref_D1_,
int64_t batch_stride_A0_,
int64_t batch_stride_B0_,
int64_t batch_stride_B1_,
int64_t batch_stride_C1_,
int64_t batch_stride_D1_,
int64_t batch_stride_Bias0_,
int64_t batch_stride_Scale0_,
typename OutputOp0::Params epilogue0_ = typename OutputOp0::Params(),
typename OutputOp1::Params epilogue1_ = typename OutputOp1::Params(),
int batch_count_ = 1
):
mode(mode_),
problem_size_0(problem_size_0_),
problem_size_1(problem_size_1_),
ref_A0(ref_A0_),
ref_B0(ref_B0_),
ref_C0(ref_C0_),
ref_Scale0(ref_Scale0_),
ref_Bias0(ref_Bias0_),
ref_B1(ref_B1_),
ref_C1(ref_C1_),
ref_D1(ref_D1_),
batch_stride_A0(batch_stride_A0_),
batch_stride_B0(batch_stride_B0_),
batch_stride_B1(batch_stride_B1_),
batch_stride_C1(batch_stride_C1_),
batch_stride_D1(batch_stride_D1_),
batch_stride_Bias0(batch_stride_Bias0_),
batch_stride_Scale0(batch_stride_Scale0_),
epilogue0(epilogue0_),
epilogue1(epilogue1_),
batch_count(batch_count_) {
}
};
// Arguments structure for grouped B2B problems
struct GroupedArguments {
GemmCoord* problem_size_0;
GemmCoord* problem_size_1;
typename B2bMma::IteratorA0::TensorRef* ref_A0;
typename B2bMma::IteratorB0::TensorRef* ref_B0;
typename Epilogue::OutputTileIterator::TensorRef* ref_C0;
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Scale0;
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Bias0;
typename B2bMma::IteratorB1::TensorRef* ref_B1;
typename Epilogue::OutputTileIterator::TensorRef* ref_C1;
typename Epilogue::OutputTileIterator::TensorRef* ref_D1;
// Epilogue params remain constant across all problmes in the group. Thus,
// the parameter here is not a pointer.
typename OutputOp0::Params epilogue0;
typename OutputOp1::Params epilogue1;
int problem_count;
int threadblock_count;
GemmCoord* host_problem_sizes;
CUTLASS_HOST_DEVICE
GroupedArguments(
int problem_count,
GemmCoord* problem_size_0_,
GemmCoord* problem_size_1_,
typename B2bMma::IteratorA0::TensorRef* ref_A0_,
typename B2bMma::IteratorB0::TensorRef* ref_B0_,
typename Epilogue::OutputTileIterator::TensorRef* ref_C0_,
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Scale0_,
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Bias0_,
typename B2bMma::IteratorB1::TensorRef* ref_B1_,
typename Epilogue::OutputTileIterator::TensorRef* ref_C1_,
typename Epilogue::OutputTileIterator::TensorRef* ref_D1_,
typename OutputOp0::Params epilogue0_ = typename OutputOp0::Params(),
typename OutputOp1::Params epilogue1_ = typename OutputOp1::Params(),
int threadblock_count = 0
) : problem_size_0(problem_size_0_), problem_size_1(problem_size_1_),
ref_A0(ref_A0_), ref_B0(ref_B0_), ref_C0(ref_C0_),
ref_Scale0(ref_Scale0_), ref_Bias0(ref_Bias0_), ref_B1(ref_B1_),
ref_C1(ref_C1_), ref_D1(ref_D1_), epilogue0(epilogue0_), epilogue1(epilogue1_),
problem_count(problem_count),
threadblock_count(threadblock_count)
{}
};
/// Parameters structure
struct Params {
cutlass::gemm::GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
cutlass::gemm::GemmCoord problem_size_0{};
cutlass::gemm::GemmCoord problem_size_1{};
cutlass::gemm::GemmCoord grid_tiled_shape{};
int swizzle_log_tile{0};
typename B2bMma::IteratorA0::Params params_A0{};
typename B2bMma::IteratorA0::TensorRef ref_A0{};
typename B2bMma::IteratorB0::Params params_B0{};
typename B2bMma::IteratorB0::TensorRef ref_B0{};
typename Epilogue::OutputTileIterator::Params params_C0{};
typename Epilogue::OutputTileIterator::TensorRef ref_C0{};
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Scale0{};
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Bias0{};
typename B2bMma::IteratorB1::Params params_B1{};
typename B2bMma::IteratorB1::TensorRef ref_B1{};
typename Epilogue::OutputTileIterator::Params params_C1{};
typename Epilogue::OutputTileIterator::TensorRef ref_C1{};
typename Epilogue::OutputTileIterator::Params params_D1{};
typename Epilogue::OutputTileIterator::TensorRef ref_D1{};
typename OutputOp0::Params output_op_0{};
typename OutputOp1::Params output_op_1{};
int64_t batch_stride_A0{0};
int64_t batch_stride_B0{0};
int64_t batch_stride_B1{0};
int64_t batch_stride_C1{0};
int64_t batch_stride_D1{0};
int64_t batch_stride_Bias0{0};
int64_t batch_stride_Scale0{0};
int *semaphore = nullptr;
int gemm_k_iterations_0{0};
int gemm_k_size_0{0};
int gemm_k_iterations_1{0};
int gemm_k_size_1{0};
//
// Methods
//
Params() = default;
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmUniversalMode mode,
cutlass::gemm::GemmCoord const & problem_size_0,
cutlass::gemm::GemmCoord const & problem_size_1,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename B2bMma::IteratorA0::TensorRef ref_A0,
typename B2bMma::IteratorB0::TensorRef ref_B0,
typename Epilogue::OutputTileIterator::TensorRef ref_C0,
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Scale0,
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Bias0,
typename B2bMma::IteratorB1::TensorRef ref_B1,
typename Epilogue::OutputTileIterator::TensorRef ref_C1,
typename Epilogue::OutputTileIterator::TensorRef ref_D1,
int64_t batch_stride_A0,
int64_t batch_stride_B0,
int64_t batch_stride_B1,
int64_t batch_stride_C1,
int64_t batch_stride_D1,
int64_t batch_stride_Bias0,
int64_t batch_stride_Scale0,
typename OutputOp0::Params output_op_0 = typename OutputOp0::Params(),
typename OutputOp1::Params output_op_1 = typename OutputOp1::Params(),
int *workspace = nullptr
):
mode(mode),
problem_size_0(problem_size_0),
problem_size_1(problem_size_1),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle::get_log_tile(grid_tiled_shape)),
params_A0(ref_A0.layout()),
ref_A0(ref_A0),
params_B0(ref_B0.layout()),
ref_B0(ref_B0),
params_C0(ref_C0.layout()),
ref_C0(ref_C0),
ref_Scale0(ref_Scale0),
ref_Bias0(ref_Bias0),
params_B1(ref_B1.layout()),
ref_B1(ref_B1),
params_C1(ref_C1.layout()),
ref_C1(ref_C1),
params_D1(ref_D1.layout()),
ref_D1(ref_D1),
batch_stride_A0(batch_stride_A0),
batch_stride_B0(batch_stride_B0),
batch_stride_B1(batch_stride_B1),
batch_stride_C1(batch_stride_C1),
batch_stride_D1(batch_stride_D1),
batch_stride_Bias0(batch_stride_Bias0),
batch_stride_Scale0(batch_stride_Scale0),
output_op_0(output_op_0),
output_op_1(output_op_1) {
int total_gemm_k_iterations_0 = (problem_size_0.k() + B2bMma::Shape0::kK - 1) / B2bMma::Shape0::kK;
int gemm_k_iterations_0 = (total_gemm_k_iterations_0 + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size_0 = gemm_k_iterations_0 * B2bMma::Shape0::kK;
int total_gemm_k_iterations_1 = (problem_size_1.k() + B2bMma::Shape1::kK - 1) / B2bMma::Shape1::kK;
int gemm_k_iterations_1 = (total_gemm_k_iterations_1 + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size_1 = gemm_k_iterations_1 * B2bMma::Shape1::kK;
semaphore = workspace;
}
};
struct GroupedParams {
cutlass::gemm::GemmCoord* problem_size_0;
cutlass::gemm::GemmCoord* problem_size_1;
cutlass::gemm::GemmCoord* grid_tiled_shape;
typename B2bMma::IteratorA0::TensorRef* ref_A0;
typename B2bMma::IteratorB0::TensorRef* ref_B0;
typename Epilogue::OutputTileIterator::TensorRef* ref_C0;
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Scale0;
typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Bias0;
typename B2bMma::IteratorB1::TensorRef* ref_B1;
typename Epilogue::OutputTileIterator::TensorRef* ref_C1;
typename Epilogue::OutputTileIterator::TensorRef* ref_D1;
// Epilogue params remain constant across all problmes in the group. Thus,
// the parameter here is not a pointer.
typename OutputOp0::Params output_op_0;
typename OutputOp1::Params output_op_1;
using ProblemVisitor = typename detail::ProblemVisitorOrDefault<B2bMma, ThreadblockSwizzle>::value;
typename ProblemVisitor::Params problem_visitor;
int threadblock_count;
int* workspace;
CUTLASS_HOST_DEVICE
GroupedParams() {}
CUTLASS_HOST_DEVICE
GroupedParams(
GroupedArguments const &args,
void *workspace = nullptr,
int tile_count = 0
) :
problem_size_0(args.problem_size_0), problem_size_1(args.problem_size_1),
ref_A0(args.ref_A0), ref_B0(args.ref_B0), ref_C0(args.ref_C0),
ref_Scale0(args.ref_Scale0), ref_Bias0(args.ref_Bias0), ref_B1(args.ref_B1), ref_C1(args.ref_C1), ref_D1(args.ref_D1),
output_op_0(args.epilogue0), output_op_1(args.epilogue1),
problem_visitor(args.problem_size_0, args.problem_size_1, args.problem_count, workspace, tile_count),
threadblock_count(args.threadblock_count),
workspace(reinterpret_cast<int*>(workspace)) {}
CUTLASS_HOST_DEVICE
void transpose() {
// Only row-major outputs are currently supported, so no transpose is performed
}
/// Returns non-grouped paramaters to be used as input to the kernel-level
/// operator for the problem indicated by problem_visitor.
CUTLASS_HOST_DEVICE
Params to_single_params(const ProblemVisitor& problem_visitor) const {
GemmCoord problem_size0 = problem_visitor.problem_size0();
GemmCoord problem_size1 = problem_visitor.problem_size1();
int32_t idx = problem_visitor.problem_index();
GemmCoord grid_shape = problem_visitor.grid_shape(problem_size1);
return Params(
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size0,
problem_size1,
grid_shape,
ref_A0[idx],
ref_B0[idx],
ref_C0[idx],
ref_Scale0[idx],
ref_Bias0[idx],
ref_B1[idx],
ref_C1[idx],
ref_D1[idx],
0, 0, 0, 0, 0, 0, 0, // Batched B2B GEMMs within the grouped kernel are currently unsupported
output_op_0,
output_op_1,
workspace
);
}
};
/// Shared memory storage structure
union SharedStorage {
typename B2bMma::B2bMmaSharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
B2bGemm() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size_0,
cutlass::gemm::GemmCoord const & problem_size_1,
typename B2bMma::IteratorA0::TensorRef ref_A0,
typename B2bMma::IteratorB0::TensorRef ref_B0,
typename Epilogue::OutputTileIterator::TensorRef ref_C0,
typename B2bMma::IteratorB1::TensorRef ref_B1,
typename Epilogue::OutputTileIterator::TensorRef ref_C1,
typename Epilogue::OutputTileIterator::TensorRef ref_D1) {
static int const kAlignmentA = B2bMma::IteratorA0::AccessType::kElements;
static int const kAlignmentB = B2bMma::IteratorB0::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if (!TensorRef_aligned(ref_A0, kAlignmentA)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B0, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C0, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B1, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C1, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_D1, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if ((problem_size_0.m() % kAlignmentA) || (problem_size_0.k() % kAlignmentA) ||
(problem_size_0.n() % kAlignmentB) || (problem_size_0.k() % kAlignmentB) ||
(problem_size_0.m() % kAlignmentC) || (problem_size_0.n() % kAlignmentC) ||
(problem_size_1.m() % kAlignmentA) || (problem_size_1.k() % kAlignmentA) ||
(problem_size_1.n() % kAlignmentB) || (problem_size_1.k() % kAlignmentB) ||
(problem_size_1.m() % kAlignmentC) || (problem_size_1.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
// Determine if fusion sizes are valid
if(problem_size_0.m() != problem_size_1.m())
return Status::kErrorInvalidProblem;
if(problem_size_0.n() != problem_size_1.k())
return Status::kErrorInvalidProblem;
if(problem_size_0.n() > B2bMma::Shape0::kN)
return Status::kErrorInvalidProblem;
if(problem_size_1.n() > B2bMma::Shape1::kN)
return Status::kErrorInvalidProblem;
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
ThreadblockSwizzle threadblock_swizzle;
run_with_swizzle(params, shared_storage, threadblock_swizzle);
}
/// Executes one GEMM with an externally-provided swizzling function
CUTLASS_DEVICE
void run_with_swizzle(Params const ¶ms, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) {
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
ElementA0 *ptr_A0 = static_cast<ElementA0 *>(params.ref_A0.data());
ElementB0 *ptr_B0 = static_cast<ElementB0 *>(params.ref_B0.data());
ElementB1 *ptr_B1 = static_cast<ElementB1 *>(params.ref_B1.data());
ScaleBiasData *ptr_Bias0 = static_cast<ScaleBiasData *>(params.ref_Bias0.data());
ScaleBiasData *ptr_Scale0 = static_cast<ScaleBiasData *>(params.ref_Scale0.data());
int offset_k_0 = 0;
int offset_k_1 = 0;
int problem_size_k_0 = params.problem_size_0.k();
int problem_size_k_1 = params.problem_size_1.k();
if (params.mode == GemmUniversalMode::kGemm) {
// Problem size is a function of threadblock index in the K dimension
problem_size_k_0 = min(
problem_size_k_0,
(threadblock_tile_offset.k() + 1) * params.gemm_k_size_0);
// Problem size is a function of threadblock index in the K dimension
problem_size_k_1 = min(
problem_size_k_1,
(threadblock_tile_offset.k() + 1) * params.gemm_k_size_1);
offset_k_0 = threadblock_tile_offset.k() * params.gemm_k_size_0;
offset_k_1 = threadblock_tile_offset.k() * params.gemm_k_size_1;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A0 += threadblock_tile_offset.k() * params.batch_stride_A0;
ptr_B0 += threadblock_tile_offset.k() * params.batch_stride_B0;
ptr_B1 += threadblock_tile_offset.k() * params.batch_stride_B1;
ptr_Bias0 += threadblock_tile_offset.k() * params.batch_stride_Bias0;
ptr_Scale0 += threadblock_tile_offset.k() * params.batch_stride_Scale0;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A0{
threadblock_tile_offset.m() * B2bMma::Shape0::kM,
offset_k_0,
};
cutlass::MatrixCoord tb_offset_B0{
offset_k_0,
threadblock_tile_offset.n() * B2bMma::Shape0::kN
};
cutlass::MatrixCoord tb_offset_B1{
offset_k_1,
threadblock_tile_offset.n() * B2bMma::Shape1::kN
};
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations_0 = (problem_size_k_0 - tb_offset_A0.column() + B2bMma::Shape0::kK - 1) / B2bMma::Shape0::kK;
// Compute threadblock-scoped matrix multiply-add
// int gemm_k_iterations_1 = (problem_size_k_1 - tb_offset_B1.row() + B2bMma::Shape1::kK - 1) / B2bMma::Shape1::kK;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename B2bMma::IteratorA0 iterator_A0(
params.params_A0,
ptr_A0,
{params.problem_size_0.m(), problem_size_k_0},
thread_idx,
tb_offset_A0);
typename B2bMma::IteratorB0 iterator_B0(
params.params_B0,
ptr_B0,
{problem_size_k_0, params.problem_size_0.n()},
thread_idx,
tb_offset_B0);
typename B2bMma::IteratorB1 iterator_B1(
params.params_B1,
ptr_B1,
{problem_size_k_1, params.problem_size_1.n()},
thread_idx,
tb_offset_B1);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
// Construct iterators to accumulator scale/bias vector
typename B2bMma::IteratorAccumulatorScaleBias iterator_Scale0(
ptr_Scale0,
{1, params.problem_size_0.n()},
thread_idx,
warp_idx,
MatrixCoord(
0, threadblock_tile_offset.n() * B2bMma::Shape0::kN
)
);
typename B2bMma::IteratorAccumulatorScaleBias iterator_Bias0(
ptr_Bias0,
{1, params.problem_size_0.n()},
thread_idx,
warp_idx,
MatrixCoord(
0, threadblock_tile_offset.n() * B2bMma::Shape0::kN
)
);
//
// Main loop
//
OutputOp0 output_op_0(params.output_op_0);
if (cutlass::gemm::threadblock::detail::IsGroupedSwizzle<ThreadblockSwizzle>::value) {
// Wait for all threads to finish their epilogue phases from the previous tile.
__syncthreads();
}
// Construct thread-scoped matrix multiply
B2bMma b2bMma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx, params.problem_size_0.n());
typename B2bMma::FragmentC0 src_accum;
typename B2bMma::FragmentC1 accumulators;
src_accum.clear();
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
b2bMma(gemm_k_iterations_0, accumulators, iterator_A0, iterator_B0,
iterator_Scale0, iterator_Bias0, iterator_B1, src_accum, output_op_0);
//
// Epilogue
//
OutputOp1 output_op_1(params.output_op_1);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * B2bMma::Shape1::kM,
threadblock_tile_offset.n() * B2bMma::Shape1::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C1 = static_cast<ElementC *>(params.ref_C1.data());
ElementC *ptr_D1 = static_cast<ElementC *>(params.ref_D1.data());
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op_1.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C1 += threadblock_tile_offset.k() * params.batch_stride_C1;
ptr_D1 += threadblock_tile_offset.k() * params.batch_stride_D1;
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C1(
params.params_C1,
ptr_C1,
params.problem_size_1.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D1(
params.params_D1,
ptr_D1,
params.problem_size_1.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C1 = iterator_D1;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op_1, iterator_D1, accumulators, iterator_C1);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
__threadfence();
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| examples/13_two_tensor_op_fusion/kernel/b2b_gemm.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/kernel/b2b_gemm.h",
"repo_id": "examples",
"token_count": 12865
} | 1 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to call a CUTLASS SYRK kernel and provides a naive reference
matrix multiply kernel to verify its correctness.
The CUTLASS Syrk template is instantiated in the function CutlassSsyrkNN. This is kernel computes
the symmetric rank-k update (SYRK) using double-precision floating-point arithmetic and assumes
all matrices have column-major layout.
The threadblock tile size is chosen as 16x32x16 which offers good performance for large matrices.
See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available
in CUTLASS.
https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/
Aside from defining and launching the SSYRK kernel, this example does not use any other components
or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are
prevalent in the CUTLASS unit tests.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// Helper methods to check for errors
#include "helper.h"
//
// CUTLASS includes needed for double-precision SYRK kernel
//
// Defines cutlass::gemm::device::Syrk, the generic Syrk computation template class.
#include "cutlass/gemm/device/rank_k.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// This function defines a CUTLASS SYRK kernel instantiation, constructs its parameters object,
// and launches it on the CUDA device.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS SYRK template and launch a SYRK kernel.
cudaError_t CutlassSsyrkNN(
int N,
int K,
double alpha,
double const *A,
int lda,
double beta,
double *C,
int ldc) {
// Define type definition for double-precision CUTLASS SYRK with column-major
// input matrices and 16x32x16 threadblock tile size (chosen by default).
//
// To keep the interface manageable, several helpers are defined for plausible compositions
// including the following example for double-precision SYRK. Typical values are used as
// default template arguments.
//
// To view the full syrk device API interface, see `cutlass/gemm/device/syrk.h`
using ColumnMajor = cutlass::layout::ColumnMajor;
using CutlassSyrk = cutlass::gemm::device::RankK<
double,
ColumnMajor,
double,
ColumnMajor,
cutlass::FillMode::kLower,
double,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<16, 32, 16>,
cutlass::gemm::GemmShape<16, 16, 16>,
cutlass::gemm::GemmShape<8, 8, 4>,
cutlass::epilogue::thread::LinearCombination<
double,
1,
double,
double
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
5, // Stages
1, // AlignmentA
false, // SplitKSerail
cutlass::arch::OpMultiplyAdd,
cutlass::ComplexTransform::kNone,
cutlass::BlasMode::kSymmetric
>;
// Define a CUTLASS SYRK type
CutlassSyrk syrk_operator;
// Construct the CUTLASS SYRK arguments object.
//
// One of CUTLASS's design patterns is to define syrk argument objects that are constructible
// in host code and passed to kernels by value. These may include pointers, strides, scalars,
// and other arguments needed by Syrk and its components.
//
// The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible
// arguments to kernels and (2.) minimized initialization overhead on kernel entry.
//
CutlassSyrk::Arguments args(cutlass::gemm::GemmUniversalMode::kGemm,
{N, N, K}, // Syrk Problem dimensions
1, // batch_count,
{alpha, beta}, // Scalars used in the Epilogue
reinterpret_cast<void const *>(A),
const_cast<void *>(reinterpret_cast<void *>(C)),
reinterpret_cast<void *>(C), // destination matrix D (may be different memory than source C matrix)
(int64_t)N*K, // Batch strides
(int64_t)N*N,
(int64_t)N*N,
lda,
ldc,
ldc);
//
// Launch the CUTLASS SYRK kernel.
//
cutlass::Status status = syrk_operator(args);
//
// Return a cudaError_t if the CUTLASS SYRK operator returned an error code.
//
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
// Return success, if no errors were encountered.
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// The source code after this point in the file is generic CUDA using the CUDA Runtime API
// and simple CUDA kernels to initialize matrices and compute the general matrix product.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize a matrix with small integers.
__global__ void InitializeMatrix_kernel(
double *matrix,
int ldm,
int rows,
int columns,
int seed = 0) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < columns) {
int offset = i + j * ldm;
// Generate arbitrary elements.
int const k = 16807;
int const m = 16;
double value = double(((offset + seed) * k % m) - m / 2);
matrix[offset] = value;
}
}
/// Simple function to initialize a matrix to arbitrary small integers.
cudaError_t InitializeMatrix(double *matrix, int ldm, int rows, int columns, int seed = 0) {
dim3 block(16, 16);
dim3 grid(
(rows + block.x - 1) / block.x,
(columns + block.y - 1) / block.y
);
InitializeMatrix_kernel<<< grid, block >>>(matrix, ldm, rows, columns, seed);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocates device memory for a matrix then fills with arbitrary small integers.
cudaError_t AllocateMatrix(double **matrix, int ldm, int rows, int columns, int seed = 0) {
cudaError_t result;
size_t sizeof_matrix = sizeof(double) * ldm * columns;
// Allocate device memory.
result = cudaMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to allocate matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Clear the allocation.
result = cudaMemset(*matrix, 0, sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to clear matrix device memory: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Initialize matrix elements to arbitrary small integers.
result = InitializeMatrix(*matrix, ldm, rows, columns, seed);
if (result != cudaSuccess) {
std::cerr << "Failed to initialize matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Naive reference SYRK computation.
__global__ void ReferenceSyrk_kernel(
int N,
int K,
double alpha,
double const *A,
int lda,
double beta,
double *C,
int ldc) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < N && j < N && i >= j ) { // Since C is in Lower Fill Mode
double accumulator = 0;
for (int k = 0; k < K; ++k) {
accumulator += A[i + k * lda] * A[j + k * lda];
}
C[i + j * ldc] = alpha * accumulator + beta * C[i + j * ldc];
}
}
/// Reference SYRK computation.
cudaError_t ReferenceSyrk(
int N,
int K,
double alpha,
double const *A,
int lda,
double beta,
double *C,
int ldc) {
dim3 block(16, 16);
dim3 grid(
(N + block.x - 1) / block.x,
(N + block.y - 1) / block.y
);
ReferenceSyrk_kernel<<< grid, block >>>(N, K, alpha, A, lda, beta, C, ldc);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a double-precision
/// CUTLASS SYRK kernel.
cudaError_t TestCutlassSyrk(int N, int K, double alpha, double beta) {
cudaError_t result;
//
// Define several matrices to be used as operands to SYRK kernels.
//
// Compute leading dimensions for each matrix.
int lda = N;
int ldc = N;
// Compute size in bytes of the C matrix.
size_t sizeof_C = sizeof(double) * ldc * N;
// Define pointers to matrices in GPU device memory.
double *A;
double *C_cutlass;
double *C_reference;
//
// Allocate matrices in GPU device memory with arbitrary seeds.
//
result = AllocateMatrix(&A, lda, N, K, 0);
if (result != cudaSuccess) {
return result;
}
result = AllocateMatrix(&C_cutlass, ldc, N, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
return result;
}
result = AllocateMatrix(&C_reference, ldc, N, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(C_cutlass);
return result;
}
result = cudaMemcpy(C_reference, C_cutlass, sizeof_C, cudaMemcpyDeviceToDevice);
if (result != cudaSuccess) {
std::cerr << "Failed to copy C_cutlass matrix to C_reference: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
//
// Launch CUTLASS SYRK.
//
result = CutlassSsyrkNN(N, K, alpha, A, lda, beta, C_cutlass, ldc);
if (result != cudaSuccess) {
std::cerr << "CUTLASS SYRK kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
//
// Verify.
//
// Launch reference SYRK
result = ReferenceSyrk(N, K, alpha, A, lda, beta, C_reference, ldc);
if (result != cudaSuccess) {
std::cerr << "Reference SYRK kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
// Copy to host and verify equivalence.
std::vector<double> host_cutlass(ldc * N, 0);
std::vector<double> host_reference(ldc * N, 0);
result = cudaMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy CUTLASS SYRK results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
result = cudaMemcpy(host_reference.data(), C_reference, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy Reference SYRK results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
//
// Free device memory allocations.
//
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
//
// Test for bit equivalence of results.
//
if (host_cutlass != host_reference) {
std::cerr << "CUTLASS results incorrect." << std::endl;
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to basic_syrk example.
//
// usage:
//
// 00_basic_syrk <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
bool notSupported = false;
// CUTLASS must be compiled with CUDA 11 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "This example requires compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
//
// Parse the command line to obtain SYRK dimensions and scalar values.
//
// SYRK problem dimensions.
int problem[2] = { 128, 128 };
for (int i = 1; i < argc && i < 3; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Scalars used for linear scaling the result of the matrix product.
double scalars[2] = { 1, 0 };
for (int i = 3; i < argc && i < 5; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 3];
}
//
// Run the CUTLASS SYRK test.
//
cudaError_t result = TestCutlassSyrk(
problem[0], // SYRK N dimension
problem[1], // SYRK K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| examples/31_basic_syrk/basic_syrk.cu/0 | {
"file_path": "examples/31_basic_syrk/basic_syrk.cu",
"repo_id": "examples",
"token_count": 5258
} | 2 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief A file contains all functioning classes needed by GemmLayernorm.
GemmLayernorm example = GEMM0 with partial reduction fused in epilogue (EpilogueVisitorLayerNorm)
+ lightweight full reduction kernel (ApplyFinalReduction)
+ GEMM1 with elemenwise operations fused in mainloop (GemmLayernormMainloopFusion)
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <cmath>
#include <iostream>
#include <vector>
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/device/gemm_layernorm_mainloop_fusion.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_complex.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/epilogue/threadblock/epilogue_with_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "gemm_with_epilogue_visitor.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementVariance_,
typename ElementMean_,
typename ElementLayernormCompute_,
typename ElementOutput,
typename ThreadblockShape_,
bool IsShiftedVariance_ = false
>
class ApplyFinalReduction {
public:
using ElementVariance = ElementVariance_;
using ElementMean = ElementMean_;
using ElementLayernormCompute = ElementLayernormCompute_;
using ThreadblockShape = ThreadblockShape_;
// Pre-processing has ensured the layout equivelent to RowMajor
using Layout = cutlass::layout::RowMajor;
using TensorVariance = TensorRef<ElementVariance, Layout>;
using TensorMean = TensorRef<ElementMean, Layout>;
static bool const kIsShiftedVariance = IsShiftedVariance_;
//
// Arguments
//
struct Arguments {
MatrixCoord extent; ///< Extent of D and Layernorm matrices
TensorVariance ref_Variance; ///< Sum Square or Variance tensor (input / output)
TensorMean ref_Mean; ///< Sum or Mean tensor (input / output)
ElementOutput *ptr_Shifted_K; ///< Shifted K tensor pointer
//
// Methods
//
Arguments(){ }
Arguments(
MatrixCoord extent_,
TensorVariance ref_Variance_,
TensorMean ref_Mean_,
ElementOutput *ptr_Shifted_K_
):
extent(extent_),
ref_Variance(ref_Variance_),
ref_Mean(ref_Mean_),
ptr_Shifted_K(ptr_Shifted_K_)
{
}
};
struct SharedStorage {
};
//
// Params struct
//
struct Params {
Arguments args;
//
// Methods
//
Params() { }
Params(Arguments const &args_): args(args_) { }
};
private:
public:
CUTLASS_DEVICE
ApplyFinalReduction() { }
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
apply(params, shared_storage);
}
private:
/// Partial reduction
CUTLASS_DEVICE
void apply(Params const ¶ms, SharedStorage &shared_storage) {
int threadblock_num = (params.args.extent.column() + ThreadblockShape::kM - 1) / ThreadblockShape::kM;
int block_n = blockIdx.x * blockDim.x;
int thread_n = threadIdx.x;
int idx_n = block_n + thread_n;
if (idx_n >= params.args.extent.row()) {
return;
}
using ConvertVarianceOutput = cutlass::NumericConverter<ElementVariance, ElementLayernormCompute>;
using ConvertMeanOutput = cutlass::NumericConverter<ElementMean, ElementLayernormCompute>;
using ConvertVariance = cutlass::NumericConverter<ElementLayernormCompute, ElementVariance>;
using ConvertMean = cutlass::NumericConverter<ElementLayernormCompute, ElementMean>;
using ConvertShiftK = cutlass::NumericConverter<ElementLayernormCompute, ElementOutput>;
ConvertVariance convert_variance;
ConvertMean convert_mean;
ConvertVarianceOutput convert_variance_output;
ConvertMeanOutput convert_mean_output;
ElementVariance *access_square = params.args.ref_Variance.data() + idx_n;
ElementMean *access_mean = params.args.ref_Mean.data() + idx_n;
ElementVariance *access_square_bak = access_square;
ElementMean *access_mean_bak = access_mean;
ElementLayernormCompute frag_square_sum = ElementLayernormCompute(0);
ElementLayernormCompute frag_element_sum = ElementLayernormCompute(0);
ElementVariance fetch_square;
ElementMean fetch_mean;
CUTLASS_PRAGMA_UNROLL
for (int idx_m = 0; idx_m < threadblock_num; idx_m++) {
arch::global_load<ElementVariance, sizeof(ElementVariance)>(fetch_square, access_square, true);
arch::global_load<ElementMean, sizeof(ElementMean)>(fetch_mean, access_mean, true);
frag_element_sum += convert_mean(fetch_mean);
frag_square_sum += convert_variance(fetch_square);
access_square += params.args.extent.row();
access_mean += params.args.extent.row();
}
ElementLayernormCompute mean = frag_element_sum;
ElementLayernormCompute square_mean = frag_square_sum;
ElementLayernormCompute variance;
if (kIsShiftedVariance && params.args.ptr_Shifted_K != nullptr) {
ElementOutput *access_shift_k = params.args.ptr_Shifted_K + idx_n;
ElementOutput fetch_shift_k;
ConvertShiftK convert_shift_k;
arch::global_load<ElementOutput, sizeof(ElementOutput)>(fetch_shift_k, access_shift_k, true);
ElementLayernormCompute shifted_mean = mean - convert_shift_k(fetch_shift_k);
variance = cutlass::constants::one<ElementLayernormCompute>() / cutlass::fast_sqrt(square_mean - shifted_mean * shifted_mean + ElementLayernormCompute(1e-6));
}else{
variance = cutlass::constants::one<ElementLayernormCompute>() / cutlass::fast_sqrt(square_mean - mean * mean + ElementLayernormCompute(1e-6));
}
mean = -mean * variance;
access_square = access_square_bak;
access_mean = access_mean_bak;
access_square[0] = convert_variance_output(variance);
access_mean[0] = convert_mean_output(mean);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ThreadblockShape_,
int ThreadCount,
typename OutputTileIterator_,
typename AccumulatorTile_,
typename ElementAccumulator_,
typename ElementVariance_,
typename ElementMean_,
typename ElementLayernormCompute_,
typename ElementwiseFunctor_,
bool IsShiftedVariance_ = false
>
class EpilogueVisitorLayerNorm {
public:
using ElementVariance = ElementVariance_;
using ElementMean = ElementMean_;
using ElementLayernormCompute = ElementLayernormCompute_;
using AccumulatorTile = AccumulatorTile_;
using ThreadblockShape = ThreadblockShape_;
static int const kThreadCount = ThreadCount;
using OutputTileIterator = OutputTileIterator_;
using ElementwiseFunctor = ElementwiseFunctor_;
static int const kIterations = OutputTileIterator::kIterations;
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
static int const kRowIterations = OutputTileIterator::ThreadMap::Iterations::kRow;
static int const kThreads = OutputTileIterator::ThreadMap::kThreads;
static bool const kIsShiftedVariance = IsShiftedVariance_;
using ElementOutput = typename OutputTileIterator::Element;
static int const kDeltaRow = OutputTileIterator::ThreadMap::Delta::kRow;
/// Array type used in Shift-K Layernorm
static int const kRowAccessCount = kIterations * kRowIterations;
using ConvertedShiftFragment = Array<ElementLayernormCompute, kRowAccessCount>;
// Conducts manual transpose externally (already supported) for column major
using LayoutOutput = cutlass::layout::RowMajor;
using ElementAccumulator = ElementAccumulator_;
using AccumulatorFragment = Array<ElementAccumulator, kElementsPerAccess>;
using LayernormFragment = Array<ElementLayernormCompute, kElementsPerAccess>;
using OutputVector = Array<ElementOutput, kElementsPerAccess>;
using TensorRefD = TensorRef<ElementOutput, LayoutOutput>;
static int const kThreadsPerRow = OutputTileIterator::ThreadMap::Detail::RowArrangement::Detail::kShapeWidth;
static int const kThreadsInColumn = kThreads / kThreadsPerRow;
static int const kHalfThreadsPerRow = (kThreadsPerRow >> 1);
/// Argument structure
struct Arguments {
typename ElementwiseFunctor::Params elementwise;
TensorRefD ref_C;
TensorRefD ref_D;
ElementVariance *ptr_Variance;
ElementMean *ptr_Mean;
ElementOutput *ptr_Shifted_K;
//
// Methods
//
Arguments():
ptr_Variance(nullptr),
ptr_Mean(nullptr),
ptr_Shifted_K(nullptr)
{
}
Arguments(
typename ElementwiseFunctor::Params elementwise_,
TensorRefD ref_C_,
TensorRefD ref_D_,
ElementVariance *ptr_Variance,
ElementMean *ptr_Mean_,
ElementOutput *ptr_Shifted_K_ = nullptr
):
elementwise(elementwise_),
ref_C(ref_C_),
ref_D(ref_D_),
ptr_Variance(ptr_Variance),
ptr_Mean(ptr_Mean_),
ptr_Shifted_K(ptr_Shifted_K_)
{
}
};
struct Params {
typename ElementwiseFunctor::Params elementwise;
typename OutputTileIterator::Params params_C;
typename OutputTileIterator::Params params_D;
typename OutputTileIterator::Element *ptr_C;
typename OutputTileIterator::Element *ptr_D;
ElementVariance *ptr_Variance;
ElementMean *ptr_Mean;
ElementOutput *ptr_Shifted_K;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
ptr_D(nullptr),
ptr_Variance(nullptr),
ptr_Mean(nullptr)
{
}
CUTLASS_HOST_DEVICE
Params(Arguments const &args):
elementwise(args.elementwise),
params_C(args.ref_C.layout()),
params_D(args.ref_D.layout()),
ptr_C(args.ref_C.data()),
ptr_D(args.ref_D.data()),
ptr_Variance(args.ptr_Variance),
ptr_Mean(args.ptr_Mean),
ptr_Shifted_K(args.ptr_Shifted_K)
{
}
};
/// Shared storage
struct SharedStorage {
};
private:
Params const & params_;
SharedStorage & shared_storage_;
MatrixCoord extent_;
ElementwiseFunctor elementwise_;
OutputTileIterator iterator_C_;
OutputTileIterator iterator_D_;
typename OutputTileIterator::Fragment fragment_C_;
typename OutputTileIterator::Fragment fragment_D_;
ElementAccumulator alpha_;
ElementAccumulator beta_;
ConvertedShiftFragment shift_k_frag_;
ElementLayernormCompute accum_sum_square_;
ElementLayernormCompute accum_sum_element_;
MatrixCoord thread_offset_;
public:
CUTLASS_DEVICE
EpilogueVisitorLayerNorm(
Params const ¶ms, ///< Parameters routed to the epilogue
SharedStorage &shared_storage, ///< Shared storage needed by the functors here
MatrixCoord const &problem_size0, ///< Problem size of the output
int thread_idx, ///< Thread index within the threadblock
int warp_idx, ///< Warp index within the threadblock
int lane_idx, ///< Lane index within the warp
MatrixCoord const &threadblock_offset = MatrixCoord(0, 0)
):
params_(params),
shared_storage_(shared_storage),
extent_(problem_size0),
elementwise_(params.elementwise),
iterator_C_(params.params_C, params.ptr_C, problem_size0, thread_idx, threadblock_offset),
iterator_D_(params.params_D, params.ptr_D, problem_size0, thread_idx, threadblock_offset)
{
alpha_ = (params.elementwise.alpha_ptr ? *params.elementwise.alpha_ptr : params.elementwise.alpha);
beta_ = (params.elementwise.beta_ptr ? *params.elementwise.beta_ptr : params.elementwise.beta);
if (beta_ == ElementAccumulator()) {
iterator_C_.clear_mask();
}
}
/// Helper to indicate split-K behavior
CUTLASS_DEVICE
void set_k_partition(
int split_k_index, ///< Index of this threadblock within split-K partitioned scheme
int split_k_slices) { ///< Total number of split-K slices
}
/// Called to set the batch index
CUTLASS_DEVICE
void set_batch_index(int batch_idx) {
}
/// Called at the start of the epilogue just before iterating over accumulator slices
CUTLASS_DEVICE
void begin_epilogue() {
// If shift-K feature is enabled, we load shift-k fragment
// at the very beginning of an epilogue
if (kIsShiftedVariance && params_.ptr_Shifted_K != nullptr) {
shift_k_frag_.clear();
int thread_offset_row_base = iterator_D_.thread_start_row();
CUTLASS_PRAGMA_UNROLL
for (int iter_idx = 0; iter_idx < kIterations; ++iter_idx) {
int step_offset = iter_idx * OutputTileIterator::Shape::kRow;
CUTLASS_PRAGMA_UNROLL
for (int rid = 0; rid < kRowIterations; ++rid) {
int row_step_offset = rid * kDeltaRow;
int row_offset = thread_offset_row_base + step_offset + row_step_offset;
bool is_load = (row_offset < extent_.row());
shift_k_frag_[iter_idx * kRowIterations + rid] = load_shift_k_(row_offset, is_load);
}
}
}
}
/// Called at the start of one step before starting accumulator exchange
CUTLASS_DEVICE
void begin_step(int step_idx) {
fragment_D_.clear();
if (elementwise_.kScale != cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling) {
fragment_C_.clear();
iterator_C_.load(fragment_C_);
++iterator_C_;
}
}
/// Called at the start of a row
CUTLASS_DEVICE
void begin_row(int row_idx) {
}
/// Called after accumulators have been exchanged for each accumulator vector
CUTLASS_DEVICE
void visit(
int iter_idx,
int row_idx,
int column_idx,
int frag_idx,
AccumulatorFragment const &accum) {
using Mul = cutlass::multiplies<ElementLayernormCompute>;
using Minus = cutlass::minus<ElementLayernormCompute>;
using Exp = cutlass::fast_exp_op<ElementLayernormCompute>;
[[maybe_unused]] Minus minus;
[[maybe_unused]] Mul mul;
[[maybe_unused]] Exp exponential;
LayernormFragment result;
thread_offset_ =
iterator_D_.thread_start() +
OutputTileIterator::ThreadMap::iteration_offset(frag_idx);
NumericArrayConverter<ElementLayernormCompute, ElementOutput, kElementsPerAccess> source_converter;
OutputVector &source_vector = reinterpret_cast<OutputVector *>(&fragment_C_)[frag_idx];
bool column_guard = (thread_offset_.column() < extent_.column());
if (elementwise_.kScale == cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling) {
result = source_converter(elementwise_(accum));
}else{
result = source_converter(elementwise_(accum, source_vector));
}
ElementLayernormCompute inv_scalar = cutlass::constants::one<ElementLayernormCompute>() / ElementLayernormCompute(extent_.column());
// Fragment is cleared for non-reachable columns so no need to check against column guard
accum_sum_element_ = element_sum_accumulator_(result);
// Square sum is different. Non-reachable columns should've been computed for shift-k
// Otherwise we will incorrectly have some extra k^2 added into square sum.
if (column_guard) {
accum_sum_square_ = (kIsShiftedVariance) ? \
square_sum_accumulator_(result, shift_k_frag_[iter_idx * kRowIterations + row_idx]) : \
square_sum_accumulator_(result);
}
else {
accum_sum_square_ = ElementLayernormCompute(0);
}
accum_sum_element_ *= inv_scalar;
accum_sum_square_ *= inv_scalar;
// After performing the in-thread reduction, we then perform cross-thread / in-warp reduction
CUTLASS_PRAGMA_UNROLL
for (int i = kHalfThreadsPerRow; i > 0; i >>= 1) {
accum_sum_element_ += __shfl_xor_sync(0xFFFFFFFF, accum_sum_element_, i);
accum_sum_square_ += __shfl_xor_sync(0xFFFFFFFF, accum_sum_square_, i);
}
// Convert to the output
NumericArrayConverter<ElementOutput, ElementLayernormCompute, kElementsPerAccess> output_converter;
OutputVector &output = reinterpret_cast<OutputVector *>(&fragment_D_)[frag_idx];
output = output_converter(result);
}
/// Called at the start of a row
CUTLASS_DEVICE
void end_row(int row_idx) {
using ConvertVarianceOutput = cutlass::NumericConverter<ElementVariance, ElementLayernormCompute>;
using ConvertMeanOutput = cutlass::NumericConverter<ElementMean, ElementLayernormCompute>;
ConvertVarianceOutput convert_variance_output;
ConvertMeanOutput convert_mean_output;
bool is_write_thread = (thread_offset_.row() < extent_.row() && (threadIdx.x % kThreadsPerRow) == 0);
int row_offset = thread_offset_.row() + blockIdx.y * extent_.row();
ElementVariance *curr_ptr_sum_square = params_.ptr_Variance + row_offset;
ElementMean *curr_ptr_element_sum = params_.ptr_Mean + row_offset;
arch::global_store<ElementVariance, sizeof(ElementVariance)>(
convert_variance_output(accum_sum_square_),
(void *)curr_ptr_sum_square,
is_write_thread);
arch::global_store<ElementMean, sizeof(ElementMean)>(
convert_mean_output(accum_sum_element_),
(void *)curr_ptr_element_sum,
is_write_thread);
}
/// Called after all accumulator elements have been visited
CUTLASS_DEVICE
void end_step(int step_idx) {
iterator_D_.store(fragment_D_);
++iterator_D_;
}
/// Called after all steps have been completed
CUTLASS_DEVICE
void end_epilogue() {
}
private:
CUTLASS_DEVICE
ElementLayernormCompute load_shift_k_(int row_offset, bool is_load) {
using ConvertShiftK = cutlass::NumericConverter<ElementLayernormCompute, ElementOutput>;
ConvertShiftK convert_shift_k;
ElementOutput shift_k_val;
// Computes the address to load shift_k element
ElementOutput *curr_ptr_shift_k = params_.ptr_Shifted_K + row_offset;
// Conditionally loads from global memory
arch::global_load<ElementOutput, sizeof(ElementOutput)>(shift_k_val, (void *)curr_ptr_shift_k, is_load);
// Converts data type to return
ElementLayernormCompute converted_shift_k_val = convert_shift_k(shift_k_val);
return converted_shift_k_val;
}
CUTLASS_DEVICE
ElementLayernormCompute square_sum_accumulator_(LayernormFragment const &accum) {
ElementLayernormCompute sum_ = ElementLayernormCompute(0);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < LayernormFragment::kElements; ++i) {
auto accum_ = accum[i];
sum_ += accum_ * accum_;
}
return sum_;
}
CUTLASS_DEVICE
ElementLayernormCompute square_sum_accumulator_(LayernormFragment const &accum, ElementLayernormCompute shift_k_val) {
ElementLayernormCompute sum_ = ElementLayernormCompute(0);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < LayernormFragment::kElements; ++i) {
auto accum_ = accum[i] - shift_k_val;
sum_ += accum_ * accum_;
}
return sum_;
}
CUTLASS_DEVICE
ElementLayernormCompute element_sum_accumulator_(LayernormFragment const &accum) {
ElementLayernormCompute sum_ = ElementLayernormCompute(0);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < LayernormFragment::kElements; ++i) {
sum_ += accum[i];
}
return sum_;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
/////////////////////////////////////////////////////////////////////////////////////////////////
///
template <
typename ElementInputA0_,
typename LayoutInputA0_,
typename ElementInputB0_,
typename LayoutInputB0_,
typename ElementOutput_,
typename LayoutOutput_,
typename ElementCompute_,
typename EpilogueFunctorOp_,
typename ThreadblockShape_,
typename WarpShape_,
typename InstructionShape_,
int Stages0,
int Stages1,
bool IsShiftedVariance_ = false
>
class GemmLayernorm {
public:
///////////////////////////////////////////////////////////////////////////////////////////////
//
// Type definitions
//
static bool const kInternalTranspose = cutlass::platform::is_same<LayoutOutput_, cutlass::layout::ColumnMajor>::value;
static bool const kIsShiftedVariance = IsShiftedVariance_;
// These is mandatory layout.
using LayoutInputScaleBias = cutlass::layout::RowMajor;
// These are mandatory data types.
using ElementLayernormCompute = float;
using ElementInputScaleBias = cutlass::half_t;
// These are mandatory params required by mainloop fusion
using OperatorClass = cutlass::arch::OpClassTensorOp;
using ArchTag = cutlass::arch::Sm80;
// These are mandatory layouts and data types
// that are inheritated from pre-defined params
using LayoutSumSqr = LayoutInputScaleBias;
using LayoutSum = LayoutInputScaleBias;
using ElementMean = ElementInputScaleBias;
using ElementVariance = ElementInputScaleBias;
///////////////////////////////////////////////////////////////////////////////////////////////
using LayoutInputA0 = LayoutInputA0_;
using LayoutInputB0 = LayoutInputB0_;
using LayoutInputA1 = LayoutOutput_;
using LayoutInputB1 = LayoutOutput_;
using LayoutOutputC0 = LayoutOutput_;
using LayoutOutputC1 = LayoutOutput_;
using ElementInputA0 = ElementInputA0_;
using ElementInputB0 = ElementInputB0_;
using ElementOutputC0 = ElementOutput_;
using ElementCompute = ElementCompute_;
using ElementInputB1 = ElementInputB0_;
using ElementInputA1 = ElementOutputC0;
using ElementOutputC1 = ElementOutputC0;
using EpilogueFunctorOp = EpilogueFunctorOp_;
using TensorRefA = TensorRef<ElementInputA0, LayoutInputA0>;
using TensorRefB = TensorRef<ElementInputB0, LayoutInputB0>;
using TensorRefC = TensorRef<ElementOutputC0, LayoutOutputC0>;
using TensorVariance = TensorRef<ElementVariance, LayoutSumSqr>;
using TensorMean = TensorRef<ElementMean, LayoutSum>;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
static int const kStages0 = Stages0;
static int const kStages1 = Stages1;
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
///////////////////////////////////////////////////////////////////////////////////////////////
using MapArguments = cutlass::gemm::kernel::detail::MapArguments<
ElementInputA0,
LayoutInputA0,
cutlass::ComplexTransform::kNone,
128 / cutlass::sizeof_bits<ElementInputA0>::value,
ElementInputB0,
LayoutInputB0,
cutlass::ComplexTransform::kNone,
128 / cutlass::sizeof_bits<ElementInputB0>::value,
LayoutOutputC0,
kInternalTranspose
>;
using DefaultGemmKernel = typename cutlass::gemm::kernel::DefaultGemm<
typename MapArguments::ElementA,
typename MapArguments::LayoutA,
MapArguments::kAlignmentA,
typename MapArguments::ElementB,
typename MapArguments::LayoutB,
MapArguments::kAlignmentB,
ElementOutputC0,
typename MapArguments::LayoutC,
ElementCompute,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueFunctorOp,
SwizzleThreadBlock,
kStages0,
true,
typename cutlass::gemm::device::DefaultGemmConfiguration<
OperatorClass, ArchTag, ElementInputA0, ElementInputB0, ElementOutputC0, ElementCompute>::Operator,
cutlass::gemm::SharedMemoryClearOption::kNone
>::GemmKernel;
///////////////////////////////////////////////////////////////////////////////////////////////
// Epilogue visitor
using EpilogueVisitor = kernel::EpilogueVisitorLayerNorm<
ThreadblockShape,
DefaultGemmKernel::kThreadCount,
typename DefaultGemmKernel::Epilogue::OutputTileIterator,
typename DefaultGemmKernel::Epilogue::AccumulatorFragmentIterator::AccumulatorTile,
ElementCompute,
ElementVariance,
ElementMean,
ElementLayernormCompute,
EpilogueFunctorOp,
kIsShiftedVariance
>;
/// Epilogue
using Epilogue = typename cutlass::epilogue::threadblock::EpilogueWithVisitorFromExistingEpilogue<
EpilogueVisitor,
typename DefaultGemmKernel::Epilogue
>::Epilogue;
// GEMM
using GemmEpilogueFusion = gemm::kernel::GemmWithEpilogueVisitor<
typename DefaultGemmKernel::Mma,
Epilogue,
SwizzleThreadBlock
>;
using ApplyFinalReductionKernel = kernel::ApplyFinalReduction<
ElementVariance,
ElementMean,
ElementLayernormCompute,
ElementOutputC0,
ThreadblockShape,
kIsShiftedVariance
>;
using GemmMainloopFusion = typename cutlass::gemm::device::GemmLayernormMainloopFusion<
ElementInputA1, LayoutInputA1,
ElementInputB1, LayoutInputB1,
ElementInputScaleBias, LayoutInputScaleBias,
ElementOutputC1, LayoutOutputC1,
ElementCompute,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueFunctorOp,
SwizzleThreadBlock,
kStages1
>;
public:
/// Arguments class
struct Arguments {
typename GemmEpilogueFusion::Arguments gemm0;
typename GemmMainloopFusion::Arguments gemm1;
typename ApplyFinalReductionKernel::Arguments reduction;
cutlass::gemm::GemmCoord extend;
//
// Methods
//
Arguments() { }
Arguments(
cutlass::gemm::GemmCoord problem_size0,
cutlass::gemm::GemmCoord problem_size1,
ElementInputA0 * ptr_A,
ElementInputB0 * ptr_B,
ElementOutputC0 * ptr_C,
ElementOutputC0 * ptr_D,
ElementOutputC0 * ptr_E,
ElementOutputC0 * ptr_O,
int64_t ldm_A,
int64_t ldm_B,
int64_t ldm_C,
int64_t ldm_D,
int64_t ldm_E,
int64_t ldm_O,
typename EpilogueFunctorOp::Params linear_scaling,
TensorVariance ref_Variance_,
TensorMean ref_Mean_,
TensorVariance ref_Gamma_,
TensorMean ref_Beta_,
ElementOutputC0 *ptr_Shifted_K = nullptr
):
gemm0(
cutlass::gemm::GemmUniversalMode::kGemm,
{kInternalTranspose ? problem_size0.n() : problem_size0.m(),\
kInternalTranspose ? problem_size0.m() : problem_size0.n(),\
problem_size0.k()},
{kInternalTranspose ? ptr_B : ptr_A, \
kInternalTranspose ? ldm_B : ldm_A},
{kInternalTranspose ? ptr_A : ptr_B, \
kInternalTranspose ? ldm_A : ldm_B},
typename EpilogueVisitor::Arguments(
linear_scaling,
{ptr_C, ldm_C},
{ptr_D, ldm_D},
ref_Variance_.data(),
ref_Mean_.data(),
ptr_Shifted_K
)
),
reduction(
MatrixCoord(kInternalTranspose ? problem_size0.n() : problem_size0.m(),\
kInternalTranspose ? problem_size0.m() : problem_size0.n()),
ref_Variance_,
ref_Mean_,
ptr_Shifted_K
),
gemm1(
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size1,
1,
linear_scaling,
kInternalTranspose ? ptr_E : ptr_D,
kInternalTranspose ? ptr_D : ptr_E,
ref_Variance_.data(),
ref_Mean_.data(),
ref_Gamma_.data(),
ref_Beta_.data(),
ptr_O,
ptr_O,
problem_size1.m() * problem_size1.k(),
problem_size1.n() * problem_size1.k(),
problem_size1.n(),
problem_size1.n(),
problem_size1.k(),
problem_size1.k(),
problem_size1.m() * problem_size1.n(),
problem_size1.m() * problem_size1.n(),
kInternalTranspose ? ldm_E : ldm_D,
kInternalTranspose ? ldm_D : ldm_D,
ref_Variance_.layout().stride(0),
ref_Mean_.layout().stride(0),
ref_Gamma_.layout().stride(0),
ref_Beta_.layout().stride(0),
ldm_O,
ldm_O
),
extend(problem_size0)
{
}
};
struct Params {
typename GemmEpilogueFusion::Params gemm0;
typename ApplyFinalReductionKernel::Params reduction;
MatrixCoord extend;
//
// Methods
//
Params() { }
Params(Arguments const &args):
gemm0(args.gemm0),
reduction(args.reduction),
extend(MatrixCoord(args.extend.m(), args.extend.n()))
{
}
};
public:
// Gemm
//
// Methods
//
private:
Params params_;
GemmMainloopFusion gemm_fusion_op;
public:
/// Ctor
GemmLayernorm() {
}
/// Initialize
Status initialize(Arguments const &args) {
params_ = Params(args);
cutlass::Status status;
size_t workspace_size = gemm_fusion_op.get_workspace_size(args.gemm1);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
status = gemm_fusion_op.can_implement(args.gemm1);
CUTLASS_CHECK(status);
status = gemm_fusion_op.initialize(args.gemm1, workspace.get());
CUTLASS_CHECK(status);
return cutlass::Status::kSuccess;
}
/// Run
Status run(cudaStream_t stream) {
//
// Launch the GEMM + layernorm kernel
//
dim3 gemm_grid = SwizzleThreadBlock().get_grid_shape(params_.gemm0.grid_tiled_shape);
dim3 gemm_block(GemmEpilogueFusion::kThreadCount, 1, 1);
int gemm_smem_size = int(sizeof(typename GemmEpilogueFusion::SharedStorage));
cutlass::Kernel<GemmEpilogueFusion><<<gemm_grid, gemm_block, gemm_smem_size, stream>>>(params_.gemm0);
cudaError_t result = cudaGetLastError();
if (result != cudaSuccess) {
return cutlass::Status::kErrorInternal;
}
//
// Launch the ApplyFinalReductionKernel
//
// always performs reduction from leading dimension
int leading_dim_0 = kInternalTranspose ? params_.extend.row() : params_.extend.column();
int leading_dim_1 = kInternalTranspose ? params_.extend.column() : params_.extend.row();
int thread_per_block = 128;
int block_per_row = (leading_dim_1 + thread_per_block - 1) / thread_per_block;
if (block_per_row < 4) {
thread_per_block = 32;
block_per_row = (leading_dim_1 + thread_per_block - 1) / thread_per_block;
}
dim3 final_reduction_block(thread_per_block);
dim3 final_reduction_grid(block_per_row);
Kernel<ApplyFinalReductionKernel><<<
final_reduction_grid, final_reduction_block, sizeof(typename ApplyFinalReductionKernel::SharedStorage), stream
>>>(params_.reduction);
result = cudaGetLastError();
if (result != cudaSuccess) {
return cutlass::Status::kErrorInternal;
}
//
// Launch the GEMM + mainloop fusion kernel
//
cutlass::Status status = gemm_fusion_op();
CUTLASS_CHECK(status);
return cutlass::Status::kSuccess;
}
/// Function call operator
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/37_gemm_layernorm_gemm_fusion/gemm_with_layernorm.h/0 | {
"file_path": "examples/37_gemm_layernorm_gemm_fusion/gemm_with_layernorm.h",
"repo_id": "examples",
"token_count": 12997
} | 3 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run group convolution kernels using functions and data structures
provided by CUTLASS using tensor cores; which we run on a NVIDIA Ampere GPU.
There are 2 group conv mode:
1. cutlass::conv::GroupMode::kSingleGroup
This mode is for large K problem size: k_per_group (K/groups) equals or larger than
threadblock_tile_N. One or multiple threadblocks calculate data of one group.
2. cutlass::conv::GroupMode::kMultipleGroup
This mode is for small K problem size: k_per_group (K/groups) is smaller than threadblock_tile_N.
One threadblock will calculate data from more than one group.
Function profile_convolution_selecter() shows how to choose kernel with different group mode according
to problem size and threadblock_tile size.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_group_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 3;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
// Analytic kernel and operation for single group problem size
using AnalyticSingleGroupKernel = typename cutlass::conv::kernel::DefaultConv2dGroupFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::GroupMode::kSingleGroup,
cutlass::conv::IteratorAlgorithm::kAnalytic
>::Kernel;
using AnalyticSingleGroupOperation = cutlass::conv::device::ImplicitGemmConvolution<AnalyticSingleGroupKernel>;
// Analytic kernel and operation for multiple group problem size
using AnalyticMultipleGroupKernel = typename cutlass::conv::kernel::DefaultConv2dGroupFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::GroupMode::kMultipleGroup,
cutlass::conv::IteratorAlgorithm::kAnalytic
>::Kernel;
using AnalyticMultipleGroupOperation = cutlass::conv::device::ImplicitGemmConvolution<AnalyticMultipleGroupKernel>;
// Optimized kernel and operation for single group problem size
using OptimizedSingleGroupKernel = typename cutlass::conv::kernel::DefaultConv2dGroupFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::GroupMode::kSingleGroup,
cutlass::conv::IteratorAlgorithm::kOptimized
>::Kernel;
using OptimizedSingleGroupOperation = cutlass::conv::device::ImplicitGemmConvolution<OptimizedSingleGroupKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
int groups;
bool reference_check;
bool measure_performance;
int iterations;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool optimized;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
groups(1),
reference_check(false),
measure_performance(false),
iterations(20),
alpha(1),
beta(0),
optimized(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("optimized")) {
optimized = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
cmd.get_cmd_line_argument("g", groups);
filter_size.c() = input_size.c() / groups;
cmd.get_cmd_line_argument("u", conv_stride.row());
cmd.get_cmd_line_argument("v", conv_stride.column());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "42_ampere_tensorop_group_conv example\n\n"
<< " This example uses Ampere's Tensor Core operators on F16 data types to compute\n"
<< " forward grouped convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --g=<int> Conv groups G\n\n"
<< " --u=<int> Conv stride_h\n\n"
<< " --v=<int> Conv stride_w\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --optimized If set (true), use optimized kernel, otherwise use analytic kernel.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/42_ampere_tensorop_group_conv/42_ampere_tensorop_group_conv --n=4 --h=16 --w=16 --c=256 --k=128 --r=3 --s=3 --g=8 --ref-check\n\n"
<< "$ ./examples/42_ampere_tensorop_group_conv/42_ampere_tensorop_group_conv --n=4 --h=16 --w=16 --c=256 --k=128 --r=3 --s=3 --g=2 --ref-check\n\n"
<< "$ ./examples/42_ampere_tensorop_group_conv/42_ampere_tensorop_group_conv --n=4 --h=16 --w=16 --c=256 --k=128 --r=3 --s=3 --g=2 --ref-check --optimized\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,G,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< options.groups << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
template <typename Conv2dOperation>
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0);
// Fill tensor D on host with zeros
cutlass::reference::host::TensorFill(
tensor_d.host_view());
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices,
options.groups
);
// Construct Conv2dOperation::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename Conv2dOperation::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
Conv2dOperation implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on device...\n";
// Compute with reference implementation
cutlass::reference::device::Conv2dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_ref_d.device_ref(),
options.alpha,
options.beta
);
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_d.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
} else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
} else {
result.reference_check = cutlass::Status::kInvalid;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Result profile_convolution_selecter(Options const &options) {
int k_per_group = options.filter_size.n() / options.groups;
// In group conv, if k_per_group < threadblock_N, one Threadblock will calculate multiple groups
if (k_per_group < ThreadblockShape::kN) { // MultipleGroup mode
if (options.optimized) {
std::cerr << "Invalid problem: optimized group conv kernel doesn't support MultipleGroup (one CTA calculate multiple groups) mode" << std::endl;
exit(-1);
} else {
std::cout << "Select AnalyticMultipleGroupOperation\n";
return profile_convolution<AnalyticMultipleGroupOperation>(options);
}
} else { // SingleGroup mode
if (options.optimized) {
std::cout << "Select OptimizedSingleGroupOperation\n";
return profile_convolution<OptimizedSingleGroupOperation>(options);
} else {
std::cout << "Select AnalyticSingleGroupOperation\n";
return profile_convolution<AnalyticSingleGroupOperation>(options);
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major > 8 || (props.major == 8 && props.minor >= 0))) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution_selecter(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/42_ampere_tensorop_group_conv/ampere_tensorop_group_conv.cu/0 | {
"file_path": "examples/42_ampere_tensorop_group_conv/ampere_tensorop_group_conv.cu",
"repo_id": "examples",
"token_count": 8701
} | 4 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import gen_ir
import helper
import gen_threadblock as gen_tb
class gen_default_Gemm:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = "B2bGemm"
self.template_param = template_param
self.b2b_num = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_B2bMma(self, specialized_template_args):
code = "using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma<\n"
code += specialized_template_args
code += ">::ThreadblockB2bMma;\n"
# print(code)
return code
def gen_epilogue(self):
epilogue_code = ""
epilogue_code += helper.var_idx("static const int kPartitionsK", self.b2b_num - 1) + helper.var_idx(" = ThreadblockShape", self.b2b_num - 1) + helper.var_idx("::kK / WarpShape", self.b2b_num - 1) + "::kK;\n"
epilogue_code += "using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<\n"
epilogue_code += " " + helper.var_idx("ThreadblockShape", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("typename B2bMma::Operator", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("kPartitionsK", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("EpilogueOutputOp", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("EpilogueOutputOp", self.b2b_num - 1) + "::kCount\n"
epilogue_code += ">::Epilogue;\n"
epilogue_code += "using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle, SplitKSerial>;\n\n"
return epilogue_code
def gen_include_header(self):
code = '''
/* Auto Generated code - Do not edit.*/
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/layout/matrix.h\"
#include \"{cutlass_dir}cutlass/numeric_types.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/epilogue.h\"
#include \"{cutlass_dir}cutlass/epilogue/thread/linear_combination.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/gemm/kernel/gemm_pipelined.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm75.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm70.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm80.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_simt.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/threadblock_swizzle.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_tensor_op.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_simt.h\"
#include \"{cutlass_dir}cutlass/transform/threadblock/predicated_tile_iterator.h\"
#include \"../kernel/b2b_gemm.h\"
#include \"../threadblock/default_b2b_mma.h\"
'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_code(self):
gen_using = ''
# Generate default template struct
gen_code = gen_ir.gen_template_struct("Default" + self.gen_class_name, self.template_param,"", speicalized = None, set_default=False)
filter_list = []
filter_list.append(('Stages', 2))
filter_list.append(("OperatorClass", "arch::OpClassTensorOp"))
filter_list.append(("ArchTag", "arch::Sm75"))
for i in range(self.b2b_num):
filter_list.append((helper.var_idx("LayoutC", i), "layout::RowMajor"))
rtn_template_args, speicalized_template_args = gen_ir.filtered_param(self.template_param, filter_list, keep_= True)
B2bMma_code = self.gen_B2bMma(speicalized_template_args)
epilogue_and_rest_code = self.gen_epilogue()
gen_special_code = gen_ir.gen_template_struct("Default" + self.gen_class_name, rtn_template_args, B2bMma_code + epilogue_and_rest_code, speicalized = speicalized_template_args, set_default=False)
code = gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("kernel", gen_code + gen_special_code)))
return self.gen_include_header() + code
class gen_Kernel:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = "B2bGemm"
self.template_param = template_param
self.b2bnum = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/matrix_coord.h\"\n'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_Params(self):
gen_param = ""
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + ";\n"
gen_param += " " + "cutlass::gemm::GemmCoord grid_tiled_shape;\n"
gen_param += " " + "typename B2bMma::IteratorA0::Params params_A0;\n"
gen_param += " " + "typename B2bMma::IteratorA0::TensorRef ref_A0;\n"
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::Params params_B", i) + ";\n"
gen_param += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::TensorRef ref_B", i) + ";\n"
if i == self.b2bnum - 1:
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::Params params_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_C", i) + ";\n"
else:
gen_param += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::Params params_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::TensorRef ref_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::Params params_D", self.b2bnum - 1) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_D", self.b2bnum - 1) + ";\n"
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("typename OutputOp", i) + helper.var_idx("::Params output_op_", i) + ";\n"
gen_param += " " + 'int batch_count' + ";\n"
gen_param += " " + 'int gemm_k_iterations_0' + ";\n"
return gen_param
def gen_Memberfunc(self):
code_default = "\nCUTLASS_HOST_DEVICE\n"
code_default += "Params()"
code_default += " { } \n\n"
code_construct = "\nCUTLASS_HOST_DEVICE\n"
code_construct += "Params(\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("cutlass::gemm::GemmCoord const & problem_size_", i) + ",\n"
code_construct += " " + "cutlass::gemm::GemmCoord const & grid_tiled_shape,\n"
code_construct += " " + "typename B2bMma::IteratorA0::TensorRef ref_A0,\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::TensorRef ref_B", i) + ",\n"
if i == self.b2bnum - 1:
code_construct += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_C", i) + ",\n"
else:
code_construct += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::TensorRef ref_C", i) + ",\n"
code_construct += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_D", self.b2bnum - 1) + ",\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("typename OutputOp", i) + helper.var_idx("::Params output_op_", i) + helper.var_idx(" = typename OutputOp", i) + "::Params(),\n"
code_construct += " " + "int batch_count = 1\n"
code_construct += "):\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("problem_size_", i) + helper.var_idx("(problem_size_", i) + "),\n"
code_construct += " " + "grid_tiled_shape(grid_tiled_shape),\n"
code_construct += " " + "params_A0(ref_A0.layout()),\n"
code_construct += " " + "ref_A0(ref_A0),\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("params_B", i) + helper.var_idx("(ref_B", i) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_B", i) + helper.var_idx("(ref_B", i) + "),\n"
code_construct += " " + helper.var_idx("params_C", i) + helper.var_idx("(ref_C", i) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_C", i) + helper.var_idx("(ref_C", i) + "),\n"
code_construct += " " + helper.var_idx("params_D", self.b2bnum - 1) + helper.var_idx("(ref_D", self.b2bnum - 1) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_D", self.b2bnum - 1) + helper.var_idx("(ref_D", self.b2bnum - 1) + "),\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("output_op_", i) + helper.var_idx("(output_op_", i) + "), \n"
code_construct += " " + "batch_count(batch_count) {\n"
code_construct += " " + helper.var_idx("gemm_k_iterations_", 0) + helper.var_idx(" = (problem_size_", 0) + helper.var_idx(".k() + B2bMma::Shape", 0) + helper.var_idx("::kK - 1) / B2bMma::Shape", 0) + "::kK;\n"
code_construct += "}\n"
return code_default + code_construct
def gen_using(self):
code_using = ""
for i in range(self.b2bnum - 1):
code_using += " " + helper.var_idx("using OutputOp", i) + helper.var_idx(" = typename B2bMma::OutputOp", i) + ";\n"
code_using += " " + helper.var_idx("using OutputOp", self.b2bnum - 1) + " = typename Epilogue::OutputOp;\n"
for i in range(self.b2bnum - 1):
code_using += " " + helper.var_idx("using FusedAddBiasEpilogue", i) + helper.var_idx(" = typename B2bMma::FusedAddBiasEpilogue", i) +";\n"
code_using += " " + "using WarpCount0 = typename B2bMma::WarpCount0;\n"
code_using += " " + "static int const kThreadCount = 32 * WarpCount0::kCount;\n"
code_using += gen_ir.gen_struct("Params", self.gen_Params() + self.gen_Memberfunc())
code_using += "union SharedStorage {\n"
code_using += " " + "typename B2bMma::B2bMmaSharedStorage main_loop;\n"
code_using += " " + "typename Epilogue::SharedStorage epilogue;\n"
code_using += "};\n"
return code_using
def gen_can_implement(self):
gen_code = ""
return gen_code
def gen_operator_and_constr(self):
ctr_code = "CUTLASS_HOST_DEVICE\n"
ctr_code += self.gen_class_name + "() { } \n\n"
operator_code = "CUTLASS_DEVICE\n"
operator_code += "void operator()(Params const ¶ms, SharedStorage &shared_storage) {\n"
operator_code += " " + "ThreadblockSwizzle threadblock_swizzle;\n"
operator_code += " " + "cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.grid_tiled_shape);\n"
operator_code += " " + "int batch_idx = threadblock_tile_offset.k();\n"
operator_code += " " + "if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||\n"
operator_code += " " + "params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {\n"
operator_code += " " + " " + "return;\n"
operator_code += " " + "}\n"
operator_code += " " + "cutlass::MatrixCoord tb_offset_A0{\n"
operator_code += " " + " " + "threadblock_tile_offset.m() * B2bMma::Shape0::kM,\n"
operator_code += " " + " " + "0\n"
operator_code += " " + "};\n"
for i in range(self.b2bnum):
operator_code += " " + helper.var_idx("cutlass::MatrixCoord tb_offset_B", i) + "{\n"
operator_code += " " + " " + "0,\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.n() * B2bMma::Shape", i) + "::kN\n"
operator_code += " " + "};\n"
operator_code += " " + "int thread_idx = threadIdx.x;\n\n"
operator_code += " " + "MatrixCoord threadblock_offset(\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.m() * B2bMma::Shape", self.b2bnum - 1) + "::kM,\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.n() * B2bMma::Shape", self.b2bnum - 1) + "::kN\n"
operator_code += " " + ");\n"
operator_code += " " + "typename B2bMma::IteratorA0 iterator_A0(\n"
operator_code += " " + " " + "params.params_A0,\n"
operator_code += " " + " " + "params.ref_A0.data(),\n"
operator_code += " " + " " + "params.problem_size_0.mk(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "tb_offset_A0);\n"
operator_code += " " + "iterator_A0.add_pointer_offset(batch_idx * params.problem_size_0.m() * params.problem_size_0.k());\n\n"
for i in range (self.b2bnum):
operator_code += " " + helper.var_idx("typename B2bMma::IteratorB", i ) + helper.var_idx(" iterator_B", i) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_B", i) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_B", i) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", i) + ".kn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + helper.var_idx("tb_offset_B", i) + ");\n"
operator_code += " " + helper.var_idx("iterator_B", i) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", i) + helper.var_idx(".n() * params.problem_size_", i) + ".k());\n\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("typename FusedAddBiasEpilogue", i ) + helper.var_idx("::OutputTileIterator iterator_C", i) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_C", i) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_C", i) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_" , i) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset" + ");\n"
operator_code += " " + helper.var_idx("int ref_C", i) + helper.var_idx("_stride = params.ref_C", i) + ".stride()[0];\n"
operator_code += " " + helper.var_idx("iterator_C", i) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", i) + helper.var_idx(".n() * (ref_C", i) + helper.var_idx("_stride == 0 ? 1 : params.problem_size_", i) + ".m()));\n\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("FusedAddBiasEpilogue", i ) + helper.var_idx(" epilogue_", i ) + ";\n"
operator_code += " " + "int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);\n"
operator_code += " " + "int lane_idx = threadIdx.x % 32;\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("OutputOp", i) + helper.var_idx(" output_op_", i) + helper.var_idx("(params.output_op_", i) + ");\n"
operator_code += " " + "B2bMma b2bMma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);\n"
operator_code += " " + "typename B2bMma::FragmentC0 src_accum;\n"
operator_code += " " + helper.var_idx("typename B2bMma::FragmentC", self.b2bnum - 1)+ " accumulators;\n"
operator_code += " " + "src_accum.clear();\n"
operator_code += " " + "accumulators.clear();\n"
operator_code += " " + "b2bMma(params.gemm_k_iterations_0, accumulators, iterator_A0, "
for i in range(self.b2bnum):
operator_code += helper.var_idx("iterator_B", i) + ", "
operator_code += "src_accum"
if self.b2bnum != 1:
operator_code += ", "
for i in range(self.b2bnum - 1):
operator_code += helper.var_idx("output_op_", i) + ", "
for i in range(self.b2bnum - 1):
operator_code += helper.var_idx("epilogue_", i) + ", "
for i in range(self.b2bnum - 1):
final = ", "
if i == self.b2bnum - 2:
final =""
operator_code += helper.var_idx("iterator_C", i) + final
operator_code += ");\n"
operator_code += " " + helper.var_idx("OutputOp", self.b2bnum - 1) + helper.var_idx(" output_op_", self.b2bnum - 1) + helper.var_idx("(params.output_op_", self.b2bnum - 1) + ");\n"
operator_code += " " + "threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.grid_tiled_shape);\n"
operator_code += " " + helper.var_idx("typename Epilogue::OutputTileIterator iterator_C", self.b2bnum - 1) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_C", self.b2bnum - 1) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_C", self.b2bnum - 1) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", self.b2bnum - 1) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset\n"
operator_code += " " + ");\n"
operator_code += " " + helper.var_idx("int ref_C", self.b2bnum - 1) + helper.var_idx("_stride = params.ref_C", self.b2bnum - 1) + ".stride()[0];\n"
operator_code += " " + helper.var_idx("iterator_C", self.b2bnum - 1) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", self.b2bnum - 1) + helper.var_idx(".n() * (ref_C", self.b2bnum - 1) + helper.var_idx("_stride == 0 ? 1 : params.problem_size_", self.b2bnum - 1) + ".m()));\n\n"
operator_code += " " + helper.var_idx("typename Epilogue::OutputTileIterator iterator_D", self.b2bnum - 1) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_D", self.b2bnum - 1) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_D", self.b2bnum - 1) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", self.b2bnum - 1) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset\n"
operator_code += " " + ");\n"
operator_code += " " + helper.var_idx("iterator_D", self.b2bnum - 1) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", self.b2bnum - 1) + helper.var_idx(".n() * params.problem_size_", self.b2bnum - 1) + ".m());\n\n"
operator_code += " " + "Epilogue epilogue(\n"
operator_code += " " + " " + "shared_storage.epilogue,\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "warp_idx,\n"
operator_code += " " + " " + "lane_idx\n"
operator_code += " " + ");\n"
operator_code += " " + "epilogue("
operator_code += helper.var_idx("output_op_", self.b2bnum - 1) + ", "
operator_code += helper.var_idx("iterator_D", self.b2bnum - 1) + ", "
operator_code += "accumulators, "
operator_code += helper.var_idx("iterator_C", self.b2bnum - 1) + ");\n"
operator_code += "}\n"
return ctr_code + operator_code
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/matrix_coord.h\"
#include \"{cutlass_dir}cutlass/semaphore.h\"
'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_code(self):
template_param = []
template_param.append(("typename", "B2bMma"))
template_param.append(("typename", "Epilogue"))
template_param.append(("typename", "ThreadblockSwizzle"))
template_param.append((bool, "SplitKSerial"))
code_body = ""
code_body += self.gen_using()
code_body += self.gen_operator_and_constr()
struct_code = gen_ir.gen_template_struct(self.gen_class_name, template_param, code_body)
code = self.gen_include_header()
code += gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("kernel", struct_code)))
return self.gen_include_header() + code
class gen_kernel:
def __init__(self, template_param, gen_class_name, b2b_num, output_dir, cutlass_deps_root, project_root):
self.template_param = template_param
self.gen_class_name = "B2bGemm"
self.gen_kernel_name = gen_class_name + "Kernel"
self.template_args = []
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
self.gen_default_b2b_gemm = gen_default_Gemm(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
self.gen_Kerenl = gen_Kernel(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
# Include gen_threadBlock
self.gen_threadBlock = gen_tb.gen_threadblock(template_param, gen_class_name, b2b_num, output_dir, cutlass_deps_root, project_root)
self.file_dir = output_dir + "/kernel/"
def gen_code(self, first_use_1stage):
default_b2b_gemm = self.gen_default_b2b_gemm.gen_code()
print("[INFO]: Gen kernel code [default_b2b_gemm.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "default_b2b_gemm.h", "w+") as f:
f.write(default_b2b_gemm)
kernel = self.gen_Kerenl.gen_code()
print("[INFO]: Gen kernel code [b2b_gemm.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "b2b_gemm.h", "w+") as f:
f.write(kernel)
# Call code to gen threadblock
self.gen_threadBlock.gen_code(first_use_1stage)
| examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_kernel.py/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_kernel.py",
"repo_id": "examples",
"token_count": 11319
} | 5 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Example of a GETT targeting Hopper tensor cores using the CUTLASS 3.x API.
CUTLASS has long provided implementations of Generalized Matrix times Matrix (GEMM) kernels.
However, a plethora of workloads compute on higher ranked tensors. Products of such tensors,
called tensor contractions, can be executed as multiple batched GEMMs, however, they can be
further accelerated with kernels that natively operate on these higher ranked tensors to
perform Generalized Tensor times Tensor contractions (GETT). CuTe's hierarchical layouts
and CUTLASS 3.0's unified micro-kernels make implementation of GETTs trivial. In this example,
we show how CUTLASS 3.0, CuTe, and Hopper's TMA feature together can accelerate GETTs while
making the process of authoring custom GETT kernels easier than ever before.
The modes of a tensor that participate in a GETT can be fundamentally grouped into four
semantic categories. The contraction modes (or K-modes) only appear in the A and B (left and right)
inputs but not in the C output tensor. Row modes (or M-modes) only appear in the left
input tensor (A) and the output tensor (C). Column modes (or N-modes) only appear in the
right (B) input tensor and the output tensor (C). Batch modes (or L-modes) appear in all
input and output tensors. If we fold the many modes of a tensor contraction into these four
categories, it would allow us to represent the input and output tensors as rank-3 "matrices"
that can be computed upon as if we were computing a batched GEMM!
This is exactly what CuTe's hierarchical layout representation allows us to do! Instead of having
simple integers as strides for these four modes, we can have nested strides for each of these
semantic categories that themselves have multiple modes within them -- multi-mode strides!
In CUTLASS 3.0, all one has to do to take advantage of this capability is to substitute the
required multi-mode strides instead of the default ones provided by gemm::detail::TagToStrideX.
In the following example, we illustrate how every Hopper GEMM in CUTLASS 3.0 is a GETT in disguise.
We begin by defining the four modes detailed above as Row, Col (column), Red (reduction), and
Bat (batch) strides, which we then nest for each of the in/out tensors to create our rank-3 stride
tuples. Note that although we do not define the problem shape type explicitely, it too remains a
rank-4 shape tuple just like any other batched GEMM, but instead with multi-mode shapes for each
of the four corresponding multi-modes within it. After this, the same CollectiveMma and
CollectiveBuilder we describe in examples 50 and 49 are used to create our kernel type. Nothing
else changes from a user's point of view. Note that multi-mode strides do not affect our
specializations in any way -- the lexical spelling of our kernels remains the same. The
only difference between a CUTLASS 3 batched GEMM and GETT are the instaced CuTe Layouts.
CollectiveBuilders rely on detecting the static-1 in the stride tuples to determine the major mode,
which is what the example demonstrates. However, it is possible to have all modes be dynamic as well
if the user assembles a CollectiveMma manually and ensures that the runtime strides are compatible
with the static micro-kernel of the collective (TiledMma, TiledCopy, and smem layouts). On the other
hand, a user can have more than one static stride too (which need not correspond to the major mode).
In particular, this example demonstrates a GETT where the 0th M-mode (M0) in A and the 0th K-mode (K0)
in B are major. All other combinations of major modes are supported, with the exception of mixed
K-major scenarios where both A and B are K-major (e.g. K0 is major in A but K1 is major in B).
NVIDIA Hopper architecture's TMA feature makes the predictaion required to implement these complicated
kernels trivial, as it is all handled by TMA itself without requiring any programmer effort.
Example executions, where the stride order defines the major-order (major on the left):
51_hopper_gett --modeC=m,n,l --modeA=m,k,l --modeB=k,n,l --extents=m:4096,n:4096,k:4096
51_hopper_gett --modeC=l,m,n --modeA=m,l,k --modeB=k,n,l --extents=m:128,n:128,k:128,l:64
51_hopper_gett --modeC=m,a,b,p,q,n,l --modeA=m,l,b,k,a --modeB=k,n,p,q,l --extents=m:32,a:32,b:3,n:128,k:128,l:4,p:3,q:3
*/
#include "gett_kernel.cuh"
#include "thrust/host_vector.h"
#include "thrust/device_vector.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/util/gett_commandline.hpp"
#include "cutlass/util/reference/device/gett.hpp"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/print_error.hpp"
namespace example {
// Returns true if the left-most value in the tuple is statically known to be 1
template<class Stride>
constexpr bool
is_left_major() {
// Account for stride types with and without batch mode and batch modes with static zero stride
return cute::is_constant<1, decltype(cute::size<0,0>(Stride{}))>::value;
}
// Same as cute::make_int_tuple but inserts a major stride (Int<1>) for the leftmost mode if required
template <int Rank, bool IsMajor, class Indexable>
static constexpr
auto
make_stride_tuple(Indexable const& t, int n, int64_t init_default = 0) {
static_assert(Rank > 1);
if constexpr (IsMajor) {
return cute::transform(cute::make_seq<Rank>{}, [&](auto i) {
if constexpr (i == 0) {
return cute::Int<1>{};
}
else {
return i < n ? t[i] : init_default;
}
});
}
else {
return cute::make_int_tuple<Rank>(t, n, init_default);
}
}
} // namespace example
//////////////////////////////////////////////////////////////////////////////
int
main(int argc, char const* argv[]) {
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
using namespace cute;
if (argc != 5) {
std::cout << "Number of command line args must be 4.\n";
cutlass::GettCommandLine::print_usage();
return 0;
}
//
// Define the stride types for A, B, C, and D
//
// Stride for A (left input). If reduction mode is major, same must be major in B
// For this example, M0 is major in A.
using RowModeStridesA = cute::Stride<cute::Int<1>, int64_t, int64_t, int64_t>;
using RedModeStridesA = cute::Stride<int64_t, int64_t, int64_t>;
using BatModeStridesA = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
// Stride for B (right input). If reduction mode is major, same must be major in A
// For this example, K0 is major in B.
using ColModeStridesB = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
using RedModeStridesB = cute::Stride<cute::Int<1>, int64_t, int64_t>;
using BatModeStridesB = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
// Strides for output, which can all be dynamic.
using RowModeStridesC = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
using ColModeStridesC = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
using BatModeStridesC = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
// Assmble our rank-3 multi-mode strides for the in/out tensors
using StrideA = cute::Stride<RowModeStridesA, RedModeStridesA, BatModeStridesA>;
using StrideB = cute::Stride<ColModeStridesB, RedModeStridesB, BatModeStridesB>;
using StrideC = cute::Stride<RowModeStridesC, ColModeStridesC, BatModeStridesC>;
// Note: C and D share strides here for simplicity.
// In general, they need not have the same layout.
using StrideD = StrideC;
//
// Define element types for tensors and intermediate values
//
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using ElementD = float;
using ElementAccumulator = float;
using ElementEpilogue = float;
// The following constexpr values set the max number of modes in each MNKL mode
constexpr int MaxRank_M = cute::rank(RowModeStridesA{}); // Max row modes
constexpr int MaxRank_N = cute::rank(ColModeStridesB{}); // Max column modes
constexpr int MaxRank_K = cute::rank(RedModeStridesA{}); // Max contraction modes
constexpr int MaxRank_L = cute::rank(BatModeStridesA{}); // Max batch modes
static_assert(cute::rank(RowModeStridesA{}) == cute::rank(RowModeStridesC{}));
static_assert(cute::rank(ColModeStridesB{}) == cute::rank(RowModeStridesC{}));
static_assert(cute::rank(RedModeStridesA{}) == cute::rank(RedModeStridesB{}));
static_assert(cute::rank(BatModeStridesA{}) == cute::rank(BatModeStridesC{}));
static_assert(cute::rank(BatModeStridesB{}) == cute::rank(BatModeStridesC{}));
// Parse command line to get modes, extents, and strides
cutlass::GettCommandLine cmd;
auto parsed_args = cmd.parse(argc, argv, true);
auto& m = parsed_args.M;
auto& ldAm = parsed_args.ldAm;
auto& ldCm = parsed_args.ldCm;
int rank_m = int(m.size());
auto& n = parsed_args.N;
auto& ldBn = parsed_args.ldBn;
auto& ldCn = parsed_args.ldCn;
int rank_n = int(n.size());
auto& k = parsed_args.K;
auto& ldAk = parsed_args.ldAk;
auto& ldBk = parsed_args.ldBk;
int rank_k = int(k.size());
auto& l = parsed_args.L;
auto& ldAl = parsed_args.ldAl;
auto& ldBl = parsed_args.ldBl;
auto& ldCl = parsed_args.ldCl;
int rank_l = int(l.size());
if ((rank_m > MaxRank_M) || (rank_n > MaxRank_N) || (rank_k > MaxRank_K) || (rank_l > MaxRank_L)) {
std::cerr << "ERROR: Input has more modes than statically configured.";
return 1;
}
// Check that the user input major stride match the static major strides.
if (example::is_left_major<RowModeStridesA>() && (ldAm[0] != 1)) {
std::cerr << "ERROR: A_M0 is expected to be major, but was not in the provided input!\n";
return 1;
}
if (example::is_left_major<RedModeStridesA>() && (ldAk[0] != 1)) {
std::cerr << "ERROR: A_K0 is expected to be major, but was not in the provided input!\n";
return 1;
}
if (example::is_left_major<ColModeStridesB>() && (ldBn[0] != 1)) {
std::cerr << "ERROR: B_N0 is expected to be major, but was not in the provided input!\n";
return 1;
}
if (example::is_left_major<RedModeStridesB>() && (ldBk[0] != 1)) {
std::cerr << "ERROR: B_K0 is expected to be major, but was not in the provided input!\n";
return 1;
}
// Convert to `cute::Tuple`s and set up arguments
auto M = make_int_tuple<MaxRank_M>(m.data(), rank_m, 1);
auto dAm = example::make_stride_tuple<MaxRank_M, example::is_left_major<RowModeStridesA>()>(ldAm.data(), rank_m);
auto dCm = example::make_stride_tuple<MaxRank_M, example::is_left_major<RowModeStridesC>()>(ldCm.data(), rank_m);
auto N = make_int_tuple<MaxRank_N>(n.data(), rank_n, 1);
auto dBn = example::make_stride_tuple<MaxRank_N, example::is_left_major<ColModeStridesB>()>(ldBn.data(), rank_n);
auto dCn = example::make_stride_tuple<MaxRank_N, example::is_left_major<ColModeStridesC>()>(ldCn.data(), rank_n);
auto K = make_int_tuple<MaxRank_K>(k.data(), rank_k, 1);
auto dAk = example::make_stride_tuple<MaxRank_K, example::is_left_major<RedModeStridesA>()>(ldAk.data(), rank_k);
auto dBk = example::make_stride_tuple<MaxRank_K, example::is_left_major<RedModeStridesB>()>(ldBk.data(), rank_k);
auto L = make_int_tuple<MaxRank_L>(l.data(), rank_l, 1);
auto dAl = make_int_tuple<MaxRank_L>(ldAl.data(), rank_l, 0);
auto dBl = make_int_tuple<MaxRank_L>(ldBl.data(), rank_l, 0);
auto dCl = make_int_tuple<MaxRank_L>(ldCl.data(), rank_l, 0);
// Concat tuples to turn it into rank-4 problem shape and rank-3 strides, just like GEMM
auto problem_shape = make_shape(M, N, K, L);
StrideA stride_A = make_stride(dAm, dAk, dAl);
StrideB stride_B = make_stride(dBn, dBk, dBl);
StrideC stride_C = make_stride(dCm, dCn, dCl);
StrideD stride_D = stride_C;
auto alpha = ElementEpilogue(1.0f);
auto beta = ElementEpilogue(1.0f);
//
// Allocate and init tensors
//
auto M_size = std::accumulate(std::begin(m), std::end(m), 1, std::multiplies<>{});
auto N_size = std::accumulate(std::begin(n), std::end(n), 1, std::multiplies<>{});
auto K_size = std::accumulate(std::begin(k), std::end(k), 1, std::multiplies<>{});
auto L_size = std::accumulate(std::begin(l), std::end(l), 1, std::multiplies<>{});
thrust::host_vector<ElementA> h_A(M_size * K_size * L_size);
thrust::host_vector<ElementB> h_B(N_size * K_size * L_size);
thrust::host_vector<ElementC> h_C(M_size * N_size * L_size);
thrust::host_vector<ElementD> h_D(M_size * N_size * L_size);
// Note: the cast to int here is to avoid false-negative ref-checks which can
// occur due to floating point arithmetic not being purely associative.
for (auto& a : h_A) a = ElementA(int(4*(rand() / double(RAND_MAX)) - 1));
for (auto& b : h_B) b = ElementB(int(4*(rand() / double(RAND_MAX)) - 1));
for (auto& c : h_C) c = ElementC(int(4*(rand() / double(RAND_MAX)) - 1));
for (auto& d : h_D) d = ElementD(-1);
thrust::device_vector<ElementA> d_A = h_A;
thrust::device_vector<ElementB> d_B = h_B;
thrust::device_vector<ElementC> d_C = h_C;
thrust::device_vector<ElementD> cutlass_result = h_D;
thrust::device_vector<ElementD> reference_result = h_D;
//
// Compute GETT
//
auto status = example::gett_kernel(
problem_shape,
d_A.data().get(), stride_A,
d_B.data().get(), stride_B,
ElementAccumulator{},
d_C.data().get(), stride_C,
cutlass_result.data().get(), stride_D,
alpha, beta);
if (cutlass::Status::kSuccess != status) {
std::cerr << "ERROR: GETT operator launch failed.\n";
return 1;
}
auto cuda_err = cudaDeviceSynchronize();
if (cudaSuccess != cuda_err) {
std::cerr << "ERROR: GETT operator execution failed. with error :";
std::cerr << cudaGetErrorString(cuda_err) << "\n";
return 1;
}
//
// Verify
//
cutlass::reference::device::gett(
problem_shape,
d_A.data().get(), stride_A,
d_B.data().get(), stride_B,
ElementAccumulator{},
d_C.data().get(), stride_C,
reference_result.data().get(), stride_D,
alpha, beta);
cuda_err = cudaDeviceSynchronize();
if (cudaSuccess != cuda_err) {
std::cerr << "ERROR: GETT reference execution failed. with error :";
std::cerr << cudaGetErrorString(cuda_err) << "\n";
return 1;
}
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::device::BlockCompareEqual(
reference_result.data().get(), cutlass_result.data().get(), cutlass_result.size());
if (passed) {
std::cout << "GETT verification passed.\n";
return 0;
}
else {
std::cerr << "ERROR: GETT verification failed! Printing detailed stats.\n";
h_D = reference_result;
thrust::host_vector<ElementD> h_cutlass_result = cutlass_result;
print_relative_error(h_cutlass_result.size(), h_cutlass_result.data(), h_D.data());
std::cout << "StrideA: "; print(stride_A); std::cout << '\n';
std::cout << "StrideB: "; print(stride_B); std::cout << '\n';
std::cout << "StrideC: "; print(stride_C); std::cout << '\n';
std::cout << "StrideD: "; print(stride_D); std::cout << '\n';
return 1;
}
#else
std::cerr << "Unsupported example. Please ensure CUTLASS_ARCH_MMA_SM90_SUPPORTED is defined.\n";
return 0;
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
}
| examples/51_hopper_gett/51_hopper_gett.cu/0 | {
"file_path": "examples/51_hopper_gett/51_hopper_gett.cu",
"repo_id": "examples",
"token_count": 6010
} | 6 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cute/layout.hpp"
#include "cute/tensor.hpp"
#include "cute/util/print.hpp"
namespace example {
using namespace cute;
// Empty type used to disable gather/scatter for a GEMM argument
struct NoGather
{
template<class... Ts>
NoGather(Ts...) {};
};
/// Function object that applies an index to its argument
template <class Index>
struct IndexedGather
{
CUTE_HOST_DEVICE constexpr
IndexedGather(Index const *indices = {}): indices_(indices) {}
template <typename I>
CUTE_HOST_DEVICE constexpr
Index
operator()(I i) const { return indices_[i]; }
CUTE_HOST_DEVICE friend
void
print(IndexedGather const &s) {
cute::print("Indexed");
}
Index const *indices_;
};
/// Function object that applies a stride to its argument
/// Example: StridedFunc<int,_2> gathers every other row/column
template <class Stride>
struct StridedGather
{
CUTE_HOST_DEVICE constexpr
StridedGather(Stride stride = {}): stride_(stride) {}
template <class I>
CUTE_HOST_DEVICE constexpr
auto
operator()(I i) const { return i * stride_; }
CUTE_HOST_DEVICE friend
void
print(StridedGather const &s) {
cute::print("Strided{");
print(s.stride_);
cute::print("}");
}
Stride stride_;
};
/// Custom stride object that applies a function followed by a stride
template <class Func, class Stride>
struct CustomStride
{
CUTE_HOST_DEVICE constexpr
CustomStride(Func const &func, Stride const &stride): func_(func), stride_(stride) {}
template <class I>
CUTE_HOST_DEVICE constexpr friend
auto
operator*(I i, CustomStride const &s) { return s.func_(i) * s.stride_; }
template <class I>
CUTE_HOST_DEVICE constexpr friend
auto
operator*(CustomStride const &s, I i) { return s.func_(i) * s.stride_; }
CUTE_HOST_DEVICE friend
void
print(CustomStride const & s) {
cute::print("Custom{");
print(s.func_);
cute::print(",");
print(s.stride_);
cute::print("}");
}
template<class Div>
CUTE_HOST_DEVICE constexpr friend
auto
safe_div(CustomStride const &s, Div const &div)
{
return CustomStride<Func, decltype(safe_div(s.stride_, div))>(s.func_, safe_div(s.stride_, div));
}
// Circumvent the requirement on make_layout that shape and stride are integral
template <class Shape>
CUTE_HOST_DEVICE constexpr friend
auto
make_layout(Shape const &shape, CustomStride const &stride)
{
return Layout<Shape, CustomStride>(shape, stride);
}
Func func_;
Stride stride_;
};
template<class Stride, class Func>
CUTLASS_HOST_DEVICE
auto
make_custom_stride_layout(Stride const &stride, Func&& func)
{
// Use a dummy shape and replace the first non-unit stride with a custom gather stride
auto idx = find_if(stride, [](auto x){ return not is_constant<1, decltype(x)>{}; });
constexpr int I = decltype(idx)::value;
return make_layout(repeat_like(stride, _1{}),
replace<I>(stride, CustomStride{static_cast<Func&&>(func), get<I>(stride)}));
}
/// Helper function to optionally create a gather tensor
template<class Iterator, class Shape, class Stride, class Func>
CUTLASS_HOST_DEVICE
auto
make_gather_tensor(Iterator iter, Shape const &shape, Stride const &stride, Func &&func)
{
if constexpr (not cutlass::platform::is_same<remove_cvref_t<Func>, NoGather>::value) {
Layout matrix_layout = make_identity_layout(shape);
auto offset = as_arithmetic_tuple(repeat_like(shape, _0{}));
Layout gather_layout = make_custom_stride_layout(stride, static_cast<Func&&>(func));
return make_tensor(iter, ComposedLayout{gather_layout, offset, matrix_layout});
} else {
return make_tensor(iter, shape, stride);
}
}
} // namespace example
namespace cute
{
template<int N, int I, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
upcast(Shape const& shape, Stride const& stride)
{
if constexpr (is_tuple<Shape>::value) {
return transform_layout(shape, stride, [](auto const& s, auto const& d) { return upcast<N,I>(s,d); });
} else if constexpr (is_scaled_basis<Stride>::value) {
if constexpr (Stride::mode() == I) {
return make_layout(shape_div(shape, Int<N>{}), shape_div(stride, Int<N>{}));
} else {
return make_layout(shape, stride);
}
} else {
return upcast<N>(shape, stride);
}
CUTE_GCC_UNREACHABLE;
}
template <int N, class OuterShape, class OuterStride, class Offset, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
upcast(ComposedLayout<Layout<OuterShape,OuterStride>,Offset,Layout<Shape,Stride>> const& layout)
{
// Find index of the stride-1 mode - that is the only one that requires updating inner shape and offset
auto idx = find_if(layout.layout_a().stride(), [](auto x){ return is_constant<1, decltype(x)>{}; });
constexpr int I = decltype(idx)::value;
// Upcast the outer layout (works as expected)
auto outer = upcast<N>(layout.layout_a());
// Upcast the accumulated offset along stride-1 mode
auto offset = as_arithmetic_tuple(replace<I>(layout.offset(), upcast<N>(get<I>(layout.offset()))));
// Upcast the inner layout's shape along stride-1 mode
auto inner = upcast<N,I>(layout.layout_b().shape(), layout.layout_b().stride());
return composition(outer, offset, inner);
}
} // namespace example
| examples/common/gather_tensor.hpp/0 | {
"file_path": "examples/common/gather_tensor.hpp",
"repo_id": "examples",
"token_count": 2342
} | 7 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/copy.hpp>
// Config
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
# define CUTE_ARCH_CP_ASYNC_SM80_ENABLED
#endif
namespace cute
{
/// Copy via cp.async with caching at all levels
template <class TS, class TD = TS>
struct SM80_CP_ASYNC_CACHEALWAYS
{
using SRegisters = TS[1];
using DRegisters = TD[1];
static_assert(sizeof(TS) == sizeof(TD), "cp.async requires sizeof(src_value_type) == sizeof(dst_value_type)");
static_assert(sizeof(TS) == 4 || sizeof(TS) == 8 || sizeof(TS) == 16, "cp.async sizeof(TS) is not supported");
CUTE_HOST_DEVICE static void
copy(TS const& gmem_src,
TD & smem_dst)
{
#if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED)
TS const* gmem_ptr = &gmem_src;
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile("cp.async.ca.shared.global.L2::128B [%0], [%1], %2;\n"
:: "r"(smem_int_ptr),
"l"(gmem_ptr),
"n"(sizeof(TS)));
#else
CUTE_INVALID_CONTROL_PATH("Support for cp.async instructions has not been enabled");
#endif
}
};
/// Copy via cp.async with caching at global level
template <class TS, class TD = TS>
struct SM80_CP_ASYNC_CACHEGLOBAL
{
using SRegisters = TS[1];
using DRegisters = TD[1];
static_assert(sizeof(TS) == sizeof(TD), "cp.async requires sizeof(src_value_type) == sizeof(dst_value_type)");
static_assert(sizeof(TS) == 4 || sizeof(TS) == 8 || sizeof(TS) == 16, "cp.async sizeof(TS) is not supported");
CUTE_HOST_DEVICE static void
copy(TS const& gmem_src,
TD & smem_dst)
{
#if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED)
TS const* gmem_ptr = &gmem_src;
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile("cp.async.cg.shared.global.L2::128B [%0], [%1], %2;\n"
:: "r"(smem_int_ptr),
"l"(gmem_ptr),
"n"(sizeof(TS)));
#else
CUTE_INVALID_CONTROL_PATH("Support for cp.async instructions has not been enabled");
#endif
}
};
/// Copy via cp.async with caching at all levels
template <class TS, class TD = TS>
struct SM80_CP_ASYNC_CACHEALWAYS_ZFILL
{
using SRegisters = TS[1];
using DRegisters = TD[1];
static_assert(sizeof(TS) == sizeof(TD), "cp.async requires sizeof(src_value_type) == sizeof(dst_value_type)");
static_assert(sizeof(TS) == 4 || sizeof(TS) == 8 || sizeof(TS) == 16, "cp.async sizeof(TS) is not supported");
CUTE_HOST_DEVICE static void
copy(TS const& gmem_src,
TD & smem_dst,
bool pred)
{
#if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED)
TS const* gmem_ptr = &gmem_src;
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
int src_size = pred ? sizeof(TS) : 0;
asm volatile("cp.async.ca.shared.global.L2::128B [%0], [%1], %2, %3;\n"
:: "r"(smem_int_ptr),
"l"(gmem_ptr),
"n"(sizeof(TS)),
"r"(src_size));
#else
CUTE_INVALID_CONTROL_PATH("Support for cp.async instructions has not been enabled");
#endif
}
};
/// Copy via cp.async with caching at global level
template <class TS, class TD = TS>
struct SM80_CP_ASYNC_CACHEGLOBAL_ZFILL
{
using SRegisters = TS[1];
using DRegisters = TD[1];
static_assert(sizeof(TS) == sizeof(TD), "cp.async requires sizeof(src_value_type) == sizeof(dst_value_type)");
static_assert(sizeof(TS) == 4 || sizeof(TS) == 8 || sizeof(TS) == 16, "cp.async sizeof(TS) is not supported");
CUTE_HOST_DEVICE static void
copy(TS const& gmem_src,
TD & smem_dst,
bool pred)
{
#if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED)
TS const* gmem_ptr = &gmem_src;
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
int src_size = pred ? sizeof(TS) : 0;
asm volatile("cp.async.cg.shared.global.L2::128B [%0], [%1], %2, %3;\n"
:: "r"(smem_int_ptr),
"l"(gmem_ptr),
"n"(sizeof(TS)),
"r"(src_size));
#else
CUTE_INVALID_CONTROL_PATH("Support for cp.async instructions has not been enabled");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Establishes an ordering w.r.t previously issued cp.async instructions. Does not block.
CUTE_HOST_DEVICE
void
cp_async_fence()
{
#if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED)
asm volatile("cp.async.commit_group;\n" ::);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Blocks until all but N previous cp.async.commit_group operations have committed.
template <int N>
CUTE_HOST_DEVICE
void
cp_async_wait()
{
#if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED)
if constexpr (N == 0) {
asm volatile("cp.async.wait_all;\n" ::);
} else {
asm volatile("cp.async.wait_group %0;\n" :: "n"(N));
}
#endif
}
template <int N>
CUTE_HOST_DEVICE
void
cp_async_wait(Int<N>)
{
return cp_async_wait<N>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // end namespace cute
| include/cute/arch/copy_sm80.hpp/0 | {
"file_path": "include/cute/arch/copy_sm80.hpp",
"repo_id": "include",
"token_count": 2592
} | 8 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/numeric/integral_constant.hpp>
#include <cute/util/type_traits.hpp>
namespace cute
{
template <class T, size_t N>
struct array
{
using element_type = T;
using value_type = remove_cv_t<T>;
using size_type = size_t;
using difference_type = ptrdiff_t;
using reference = element_type&;
using const_reference = const element_type&;
using pointer = element_type*;
using const_pointer = const element_type*;
using iterator = pointer;
using const_iterator = const_pointer;
CUTE_HOST_DEVICE constexpr
reference operator[](size_type pos)
{
return begin()[pos];
}
CUTE_HOST_DEVICE constexpr
const_reference operator[](size_type pos) const
{
return begin()[pos];
}
CUTE_HOST_DEVICE constexpr
reference front()
{
return *begin();
}
CUTE_HOST_DEVICE constexpr
const_reference front() const
{
return *begin();
}
CUTE_HOST_DEVICE constexpr
reference back()
{
// return *rbegin();
return operator[](N-1);
}
CUTE_HOST_DEVICE constexpr
const_reference back() const
{
// return *rbegin();
return operator[](N-1);
}
CUTE_HOST_DEVICE constexpr
T* data()
{
return __elems_;
}
CUTE_HOST_DEVICE constexpr
T const* data() const
{
return __elems_;
}
CUTE_HOST_DEVICE constexpr
iterator begin()
{
return data();
}
CUTE_HOST_DEVICE constexpr
const_iterator begin() const
{
return data();
}
CUTE_HOST_DEVICE constexpr
const_iterator cbegin()
{
return begin();
}
CUTE_HOST_DEVICE constexpr
const_iterator cbegin() const
{
return begin();
}
CUTE_HOST_DEVICE constexpr
iterator end()
{
return data() + size();
}
CUTE_HOST_DEVICE constexpr
const_iterator end() const
{
return data() + size();
}
CUTE_HOST_DEVICE constexpr
const_iterator cend()
{
return end();
}
CUTE_HOST_DEVICE constexpr
const_iterator cend() const
{
return end();
}
CUTE_HOST_DEVICE constexpr
bool empty() const
{
return size() == 0;
}
CUTE_HOST_DEVICE constexpr
size_type size() const
{
return N;
}
CUTE_HOST_DEVICE constexpr
size_type max_size() const
{
return size();
}
CUTE_HOST_DEVICE constexpr
void fill(const T& value)
{
for (auto& e : *this) {
e = value;
}
}
CUTE_HOST_DEVICE constexpr
void clear()
{
fill(T(0));
}
CUTE_HOST_DEVICE constexpr
void swap(array& other)
{
using CUTE_STL_NAMESPACE::swap;
for (size_type i = 0; i < size(); ++i) {
swap((*this)[i], other[i]);
}
}
element_type __elems_[N];
};
template <class T>
struct array<T, 0>
{
using element_type = T;
using value_type = remove_cv_t<T>;
using size_type = size_t;
using difference_type = ptrdiff_t;
using reference = element_type&;
using const_reference = const element_type&;
using pointer = element_type*;
using const_pointer = const element_type*;
using const_iterator = const_pointer;
using iterator = pointer;
CUTE_HOST_DEVICE constexpr
reference operator[](size_type pos)
{
return begin()[pos];
}
CUTE_HOST_DEVICE constexpr
const_reference operator[](size_type pos) const
{
return begin()[pos];
}
CUTE_HOST_DEVICE constexpr
reference front()
{
return *begin();
}
CUTE_HOST_DEVICE constexpr
const_reference front() const
{
return *begin();
}
CUTE_HOST_DEVICE constexpr
reference back()
{
return *begin();
}
CUTE_HOST_DEVICE constexpr
const_reference back() const
{
return *begin();
}
CUTE_HOST_DEVICE constexpr
T* data()
{
return nullptr;
}
CUTE_HOST_DEVICE constexpr
T const* data() const
{
return nullptr;
}
CUTE_HOST_DEVICE constexpr
iterator begin()
{
return nullptr;
}
CUTE_HOST_DEVICE constexpr
const_iterator begin() const
{
return nullptr;
}
CUTE_HOST_DEVICE constexpr
const_iterator cbegin()
{
return nullptr;
}
CUTE_HOST_DEVICE constexpr
const_iterator cbegin() const
{
return nullptr;
}
CUTE_HOST_DEVICE constexpr
iterator end()
{
return nullptr;
}
CUTE_HOST_DEVICE constexpr
const_iterator end() const
{
return nullptr;
}
CUTE_HOST_DEVICE constexpr
const_iterator cend()
{
return nullptr;
}
CUTE_HOST_DEVICE constexpr
const_iterator cend() const
{
return nullptr;
}
CUTE_HOST_DEVICE constexpr
bool empty() const
{
return true;
}
CUTE_HOST_DEVICE constexpr
size_type size() const
{
return 0;
}
CUTE_HOST_DEVICE constexpr
size_type max_size() const
{
return 0;
}
CUTE_HOST_DEVICE constexpr
void fill(const T& value)
{}
CUTE_HOST_DEVICE constexpr
void clear()
{}
CUTE_HOST_DEVICE constexpr
void swap(array& other)
{}
};
template <class T, size_t N>
CUTE_HOST_DEVICE constexpr
bool operator==(array<T,N> const& lhs, array<T,N> const& rhs)
{
for (size_t i = 0; i < N; ++i) {
if (lhs[i] != rhs[i]) {
return false;
}
}
return true;
}
template <class T, size_t N>
CUTE_HOST_DEVICE constexpr
void clear(array<T,N>& a)
{
a.fill(T(0));
}
template <class T, size_t N>
CUTE_HOST_DEVICE constexpr
void fill(array<T,N>& a, T const& value)
{
a.fill(value);
}
template <class T, size_t N>
CUTE_HOST_DEVICE constexpr
void swap(array<T,N>& a, array<T,N>& b)
{
a.swap(b);
}
/// @return A cute::array of the elements of @c t in reverse order.
template <class T, size_t N>
CUTE_HOST_DEVICE constexpr
cute::array<T,N> reverse(cute::array<T,N> const& t)
{
if constexpr (N == 0u) {
return t;
} else {
cute::array<T,N> t_r{};
for (size_t k = 0; k < N; ++k) {
t_r[k] = t[N - k - 1];
}
return t_r;
}
}
} // end cute
//
// Specialize tuple-related functionality for cute::array
//
#if defined(__CUDACC_RTC__)
#include <cuda/std/tuple>
#else
#include <tuple>
#endif
namespace cute
{
template <size_t I, class T, size_t N>
CUTE_HOST_DEVICE constexpr
T& get(array<T,N>& a)
{
static_assert(I < N, "Index out of range");
return a[I];
}
template <size_t I, class T, size_t N>
CUTE_HOST_DEVICE constexpr
T const& get(array<T,N> const& a)
{
static_assert(I < N, "Index out of range");
return a[I];
}
template <size_t I, class T, size_t N>
CUTE_HOST_DEVICE constexpr
T&& get(array<T,N>&& a)
{
static_assert(I < N, "Index out of range");
return cute::move(a[I]);
}
} // end namespace cute
namespace CUTE_STL_NAMESPACE
{
template <class T, size_t N>
struct tuple_size<cute::array<T,N>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, N>
{};
template <size_t I, class T, size_t N>
struct tuple_element<I, cute::array<T,N>>
{
using type = T;
};
template <class T, size_t N>
struct tuple_size<cute::array<T,N> const>
: CUTE_STL_NAMESPACE::integral_constant<size_t, N>
{};
template <size_t I, class T, size_t N>
struct tuple_element<I, cute::array<T,N> const>
{
using type = T;
};
} // end namespace CUTE_STL_NAMESPACE
#ifdef CUTE_STL_NAMESPACE_IS_CUDA_STD
namespace std
{
#if defined(__CUDACC_RTC__)
template <class... _Tp>
struct tuple_size;
template <size_t _Ip, class... _Tp>
struct tuple_element;
#endif
template <class T, size_t N>
struct tuple_size<cute::array<T,N>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, N>
{};
template <size_t I, class T, size_t N>
struct tuple_element<I, cute::array<T,N>>
{
using type = T;
};
template <class T, size_t N>
struct tuple_size<cute::array<T,N> const>
: CUTE_STL_NAMESPACE::integral_constant<size_t, N>
{};
template <size_t I, class T, size_t N>
struct tuple_element<I, cute::array<T,N> const>
{
using type = T;
};
} // end namespace std
#endif // CUTE_STL_NAMESPACE_IS_CUDA_STD
| include/cute/container/array.hpp/0 | {
"file_path": "include/cute/container/array.hpp",
"repo_id": "include",
"token_count": 3658
} | 9 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/numeric/math.hpp>
#include <cute/numeric/integral_constant.hpp>
namespace cute
{
/** Compile-time rational arithmetic type.
* Like cute::C for std::integral_constant, cute::R for std::ratio has a short name
* for error messages and compile times.
* The static data members @a num and @a den represent the reduced numerator and denominator
* of the rational value. Thus, two cute::R types with different @a n or @a d are distinct types
* even if they represent the same rational value.
* A cute::R exposes the reduced canonical type via its ::type member.
* That is, cute::R<3,6>::type is cute::R<1,2> and cute::R<6,3>::type is cute::C<2>.
* A cute::R<n,d>::value can be used much like any other trait::value. It can be involved in
* arithmetic expressions (according to the operator-overloads for cute::C and cute::R,
* though these may be incomplete) but with a potential rational value rather than an integral value.
*/
template <auto n, auto d>
class R {
static_assert(d != 0);
static constexpr auto an = abs(n);
static constexpr auto ad = abs(d);
static constexpr auto g = gcd(an, ad);
public:
static constexpr auto num = signum(n) * signum(d) * an / g;
static constexpr auto den = ad / g;
// RI: den >= 1 && gcd(abs(num),den) == 1
using type = typename conditional<num == 0 || den == 1, C<num>, R<num,den>>::type;
};
template <class T>
struct is_ratio : false_type {};
template <auto n, auto d>
struct is_ratio<R<n,d>> : true_type {};
template <auto a, auto b>
CUTE_HOST_DEVICE constexpr
typename R<a,b>::type
ratio(C<a>, C<b>) {
return {};
}
template <auto a, auto b, auto c>
CUTE_HOST_DEVICE constexpr
typename R<a*c,b>::type
ratio(C<a>, R<b,c>) {
return {};
}
template <auto a, auto b, auto c>
CUTE_HOST_DEVICE constexpr
typename R<b,a*c>::type
ratio(R<b,c>, C<a>) {
return {};
}
template <auto a, auto b, auto c, auto d>
CUTE_HOST_DEVICE constexpr
typename R<a*d,b*c>::type
ratio(R<a,b>, R<c,d>) {
return {};
}
//
// Non-reduced ratio implementations
//
template <auto a, auto b>
CUTE_HOST_DEVICE constexpr
R<a,b>
nratio(C<a>, C<b>) {
return {};
}
template <auto a, auto b, auto c>
CUTE_HOST_DEVICE constexpr
R<a*c,b>
nratio(C<a>, R<b,c>) {
return {};
}
template <auto a, auto b, auto c>
CUTE_HOST_DEVICE constexpr
R<b,a*c>
nratio(R<b,c>, C<a>) {
return {};
}
template <auto a, auto b, auto c, auto d>
CUTE_HOST_DEVICE constexpr
R<a*d,b*c>
nratio(R<a,b>, R<c,d>) {
return {};
}
//
// Operators
//
template <auto a, auto b, auto x, auto y>
CUTE_HOST_DEVICE constexpr
typename R<a*x,b*y>::type
operator*(R<a,b>, R<x,y>) {
return {};
}
template <auto a, auto b, auto c>
CUTE_HOST_DEVICE constexpr
typename R<a*c,b>::type
operator*(R<a,b>, C<c>) {
return {};
}
template <auto c, auto a, auto b>
CUTE_HOST_DEVICE constexpr
typename R<a*c,b>::type
operator*(C<c>, R<a,b>) {
return {};
}
template <auto c, auto a, auto b>
CUTE_HOST_DEVICE constexpr
typename R<c*b,a>::type
operator/(C<c>, R<a,b>) {
return {};
}
// Product with dynamic type needs to produce an integer...
template <class C, auto a, auto b,
__CUTE_REQUIRES(cute::is_std_integral<C>::value)>
CUTE_HOST_DEVICE constexpr
auto
operator*(C const& c, R<a,b>) {
return c * R<a,b>::num / R<a,b>::den;
}
// Product with dynamic type needs to produce an integer...
template <auto a, auto b, class C,
__CUTE_REQUIRES(cute::is_std_integral<C>::value)>
CUTE_HOST_DEVICE constexpr
auto
operator*(R<a,b>, C const& c) {
return c * R<a,b>::num / R<a,b>::den;
}
template <auto a, auto b, auto x, auto y>
CUTE_HOST_DEVICE constexpr
typename R<a*y+b*x, b*y>::type
operator+(R<a,b>, R<x,y>) {
return {};
}
template <auto a, auto b, auto c>
CUTE_HOST_DEVICE constexpr
typename R<a+c*b,b>::type
operator+(R<a,b>, C<c>) {
return {};
}
template <auto c, auto a, auto b>
CUTE_HOST_DEVICE constexpr
typename R<a+c*b,b>::type
operator+(C<c>, R<a,b>) {
return {};
}
template <auto a, auto b, auto x, auto y>
CUTE_HOST_DEVICE constexpr
bool_constant<R<a,b>::num == R<x,y>::num && R<a,b>::den == R<x,y>::den>
operator==(R<a,b>, R<x,y>) {
return {};
}
template <auto a, auto b, auto c>
CUTE_HOST_DEVICE constexpr
bool_constant<R<a,b>::num == c && R<a,b>::den == 1>
operator==(R<a,b>, C<c>) {
return {};
}
template <auto c, auto a, auto b>
CUTE_HOST_DEVICE constexpr
bool_constant<R<a,b>::num == c && R<a,b>::den == 1>
operator==(C<c>, R<a,b>) {
return {};
}
template <auto a, auto b>
CUTE_HOST_DEVICE constexpr
typename R<abs(a),abs(b)>::type
abs(R<a,b>) {
return {};
}
template <auto a, auto b>
CUTE_HOST_DEVICE constexpr
int32_t
log_2(R<a,b>) {
static_assert(R<a,b>::num > 0);
static_assert(R<a,b>::den > 0);
return log_2(static_cast<uint32_t>(R<a,b>::num)) - log_2(static_cast<uint32_t>(R<a,b>::den));
}
// @return A non-reduced ratio cute::R of the Trait0::value / Trait1::value
template <class Trait0, class Trait1>
CUTE_HOST_DEVICE constexpr
auto
trait_ratio(Trait0, Trait1) {
return nratio(static_value<Trait0>(), static_value<Trait1>());
}
//
// Display utilities
//
template <auto a, auto b>
CUTE_HOST_DEVICE void print(R<a,b>) {
print(C<a>{}); print("/"); print(C<b>{});
}
#if !defined(__CUDACC_RTC__)
template <auto a, auto b>
CUTE_HOST std::ostream& operator<<(std::ostream& os, R<a,b>) {
return os << "_" << C<a>{} << "/" << C<b>{};
}
#endif
} // end namespace cute
| include/cute/numeric/integral_ratio.hpp/0 | {
"file_path": "include/cute/numeric/integral_ratio.hpp",
"repo_id": "include",
"token_count": 2827
} | 10 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
//
// CUDA compatible print and printf
//
namespace cute
{
CUTE_HOST_DEVICE
int
num_digits(int x)
{
return (x < 10 ? 1 :
(x < 100 ? 2 :
(x < 1000 ? 3 :
(x < 10000 ? 4 :
(x < 100000 ? 5 :
(x < 1000000 ? 6 :
(x < 10000000 ? 7 :
(x < 100000000 ? 8 :
(x < 1000000000 ? 9 :
10)))))))));
}
//
// print dispatcher
//
CUTE_HOST_DEVICE
void
print(char c) {
printf("%c", c);
}
CUTE_HOST_DEVICE
void
print(signed char a) {
printf("%d", static_cast<int>(a));
}
CUTE_HOST_DEVICE
void
print(unsigned char a) {
printf("%u", static_cast<unsigned int>(a));
}
CUTE_HOST_DEVICE
void
print(short a) {
printf("%hd", a);
}
CUTE_HOST_DEVICE
void
print(unsigned short a) {
printf("%hu", a);
}
CUTE_HOST_DEVICE
void
print(int a) {
printf("%d", a);
}
CUTE_HOST_DEVICE
void
print(unsigned int a) {
printf("%u", a);
}
CUTE_HOST_DEVICE
void
print(long a) {
printf("%ld", a);
}
CUTE_HOST_DEVICE
void
print(unsigned long a) {
printf("%lu", a);
}
CUTE_HOST_DEVICE
void
print(long long a) {
printf("%lld", a);
}
CUTE_HOST_DEVICE
void
print(unsigned long long a) {
printf("%llu", a);
}
CUTE_HOST_DEVICE
void
print(float a) {
printf("%f", a);
}
CUTE_HOST_DEVICE
void
print(double a) {
printf("%f", a);
}
template <class... T>
CUTE_HOST_DEVICE
void
print(char const* format, T const&... t) {
printf(format, t...);
}
CUTE_HOST_DEVICE
void
print(char const* format) {
printf("%s", format);
}
//
// pretty printing
//
template <class T>
CUTE_HOST_DEVICE void
pretty_print(T const& v) {
printf(" "); print(v);
}
CUTE_HOST_DEVICE void
pretty_print(bool const& v) {
printf("%*d", 3, int(v));
}
CUTE_HOST_DEVICE void
pretty_print(int32_t const& v) {
printf("%*d", 5, v);
}
CUTE_HOST_DEVICE void
pretty_print(uint32_t const& v) {
printf("%*d", 5, v);
}
CUTE_HOST_DEVICE void
pretty_print(int64_t const& v) {
printf("%*lld", 5, static_cast<long long>(v));
}
CUTE_HOST_DEVICE void
pretty_print(uint64_t const& v) {
printf("%*llu", 5, static_cast<unsigned long long>(v));
}
CUTE_HOST_DEVICE void
pretty_print(half_t const& v) {
printf("%*.2f", 8, float(v));
}
CUTE_HOST_DEVICE void
pretty_print(float const& v) {
printf("%*.2e", 10, v);
}
CUTE_HOST_DEVICE void
pretty_print(double const& v) {
printf("%*.3e", 11, v);
}
} // end namespace cute
| include/cute/util/print.hpp/0 | {
"file_path": "include/cute/util/print.hpp",
"repo_id": "include",
"token_count": 1621
} | 11 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply-accumulate specialzied for SM89
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
#if (__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 4)
# define CUTLASS_ARCH_MMA_SM89_SUPPORTED 1
#endif
#if defined(CUTLASS_ARCH_MMA_SM89_SUPPORTED) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 890)
# define CUTLASS_ARCH_MMA_SM89_ENABLED
#endif
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
namespace detail {
// Whether the Mma uses as SM89 staged accumulation policy
template <class Operator>
static constexpr bool is_sm89_staged_policy_v =
(
// ElementA must be FP8
platform::is_same<typename Operator::ElementA, cutlass::float_e4m3_t>::value ||
platform::is_same<typename Operator::ElementA, cutlass::float_e5m2_t>::value
) &&
(
// ElementB must be FP8
platform::is_same<typename Operator::ElementB, cutlass::float_e4m3_t>::value ||
platform::is_same<typename Operator::ElementB, cutlass::float_e5m2_t>::value
) &&
(
// The instruction shape must be 16x8x32
Operator::ArchMmaOperator::Shape::kM == 16 &&
Operator::ArchMmaOperator::Shape::kN == 8 &&
Operator::ArchMmaOperator::Shape::kK == 32
) &&
(
// The operator must be OpMultiplyAdd (default)
platform::is_same<typename Operator::MathOperator, OpMultiplyAdd>::value
);
} // namespace detail
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16832 - Float {E4M3, E5M2}, FP32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation - F32 = fe4m3 * fe4m3 + F32
template <typename Operator_>
struct Mma<
gemm::GemmShape<16, 8, 32>,
32,
cutlass::float_e4m3_t,
layout::RowMajor,
cutlass::float_e4m3_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = cutlass::float_e4m3_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e4m3_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 8>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = Operator_;
using ArchTag = arch::Sm89;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm(
"mma.sync.aligned.m16n8k32.row.col.f32.e4m3.e4m3.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]),
"r"(B[0]), "r"(B[1]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation - F32 = fe4m3 * fe5m2 + F32
template <typename Operator_>
struct Mma<
gemm::GemmShape<16, 8, 32>,
32,
cutlass::float_e4m3_t,
layout::RowMajor,
cutlass::float_e5m2_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = cutlass::float_e4m3_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e5m2_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 8>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = Operator_;
using ArchTag = arch::Sm89;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm(
"mma.sync.aligned.m16n8k32.row.col.f32.e4m3.e5m2.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]),
"r"(B[0]), "r"(B[1]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation - F32 = fe5m2 * fe4m3 + F32
template <typename Operator_>
struct Mma<
gemm::GemmShape<16, 8, 32>,
32,
cutlass::float_e5m2_t,
layout::RowMajor,
cutlass::float_e4m3_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = cutlass::float_e5m2_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e4m3_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 8>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = Operator_;
using ArchTag = arch::Sm89;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm(
"mma.sync.aligned.m16n8k32.row.col.f32.e5m2.e4m3.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]),
"r"(B[0]), "r"(B[1]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation - F32 = fe5m2 * fe5m2 + F32
template <typename Operator_>
struct Mma<
gemm::GemmShape<16, 8, 32>,
32,
cutlass::float_e5m2_t,
layout::RowMajor,
cutlass::float_e5m2_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = cutlass::float_e5m2_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e5m2_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 8>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = Operator_;
using ArchTag = arch::Sm89;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm(
"mma.sync.aligned.m16n8k32.row.col.f32.e5m2.e5m2.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]),
"r"(B[0]), "r"(B[1]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
} // namespace arch
} // namespace cutlass
| include/cutlass/arch/mma_sm89.h/0 | {
"file_path": "include/cutlass/arch/mma_sm89.h",
"repo_id": "include",
"token_count": 4621
} | 12 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a proxy class for storing non-standard 16-bit floating point values with
8 bits of exponent and 7 bit of mantissa.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include "cutlass/floating_point_nvrtc.h"
#else
#include <cmath>
#include <limits>
#include <cstdint>
#include <cstring>
#endif
#include <cuda_bf16.h>
#include "cutlass/cutlass.h"
#include "cutlass/platform/platform.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Floating-point type with 8 bits of exponent and 7 bits of mantissa.
struct alignas(2) bfloat16_t {
//
// Data members
//
/// Storage type
uint16_t storage;
//
// Methods
//
/// Constructs from an unsigned short
CUTLASS_HOST_DEVICE
static bfloat16_t bitcast(uint16_t x) {
bfloat16_t h;
h.storage = x;
return h;
}
private:
struct from_32_bit_integer_t {};
static constexpr from_32_bit_integer_t from_32_bit_integer{};
template<class T>
CUTLASS_HOST_DEVICE
explicit bfloat16_t(from_32_bit_integer_t, T x) {
static_assert(cutlass::platform::is_integral<T>::value && sizeof(T) == 4, "Requires 32-bit integer");
float flt = static_cast<float>(x);
uint32_t bits;
#if defined(__CUDA_ARCH__)
bits = reinterpret_cast<uint32_t &>(flt);
#else
std::memcpy(&bits, &flt, sizeof(bits));
#endif
storage = uint16_t(bits >> 16);
}
public:
/// Default constructor
bfloat16_t() = default;
/// Reinterpret cast from CUDA's __nv_bfloat16 type
CUTLASS_HOST_DEVICE
explicit bfloat16_t(__nv_bfloat16 const & x) {
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint16_t const &>(x);
#else
__nv_bfloat16_raw raw(x);
std::memcpy(&storage, &raw.x, sizeof(storage));
#endif
}
/// Floating-point conversion - round toward nearest
CUTLASS_HOST_DEVICE
explicit bfloat16_t(float x) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) && (__CUDACC_VER_MAJOR__ >= 11)
asm("cvt.rn.bf16.f32 %0, %1;\n" : "=h"(storage) : "f"(x));
#else
uint32_t bits;
#if defined(__CUDA_ARCH__)
bits = reinterpret_cast<uint32_t &>(x);
#else
std::memcpy(&bits, &x, sizeof(bits));
#endif
if ((bits & 0x7f800000) != 0x7f800000) {
bool mantissa_bit = ((bits & (1 << 16)) != 0);
bool round_bit = ((bits & (1 << 15)) != 0);
bool sticky_bit = ((bits & ((1 << 15) - 1)) != 0);
if ((round_bit && sticky_bit) || (round_bit && mantissa_bit)) {
bits += uint32_t(1 << 16);
}
}
else if (bits & ~0xff800000) {
bits = 0x7fffffff;
}
storage = uint16_t((bits >> 16) & 0xffff);
#endif
}
/// Floating-point conversion - round toward nearest
CUTLASS_HOST_DEVICE
explicit bfloat16_t(double x): bfloat16_t(float(x)) {
}
/// Integer conversion - round toward nearest
CUTLASS_HOST_DEVICE
explicit bfloat16_t(int x) : bfloat16_t(from_32_bit_integer, x) {}
CUTLASS_HOST_DEVICE
explicit bfloat16_t(uint32_t x) : bfloat16_t(from_32_bit_integer, x) {}
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
unsigned bits = (unsigned(storage) << 16);
#if defined(__CUDA_ARCH__)
return reinterpret_cast<float const &>(bits);
#else
float flt;
std::memcpy(&flt, &bits, sizeof(flt));
return flt;
#endif
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(float(*this));
}
/// Converts to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(float(*this));
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
return (float(*this) != 0.0f);
}
/// Obtains raw bits
CUTLASS_HOST_DEVICE
uint16_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((raw() & 0x8000) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((raw() >> 7) & 0x0ff);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 127;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(raw() & 0x7f);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool signbit(cutlass::bfloat16_t const& h) {
return h.signbit();
}
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t abs(cutlass::bfloat16_t const& h) {
return cutlass::bfloat16_t::bitcast(h.raw() & 0x7fff);
}
CUTLASS_HOST_DEVICE
bool isnan(cutlass::bfloat16_t const& h) {
return (h.exponent_biased() == 0x0ff) && h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isfinite(cutlass::bfloat16_t const& h) {
return (h.exponent_biased() != 0x0ff);
}
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t nan_bf16(const char*) {
// NVIDIA canonical NaN
return cutlass::bfloat16_t::bitcast(0x7fff);
}
CUTLASS_HOST_DEVICE
bool isinf(cutlass::bfloat16_t const& h) {
return (h.exponent_biased() == 0x0ff) && !h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isnormal(cutlass::bfloat16_t const& h) {
return h.exponent_biased() && h.exponent_biased() != 0x0ff;
}
CUTLASS_HOST_DEVICE
int fpclassify(cutlass::bfloat16_t const& h) {
int exp = h.exponent_biased();
int mantissa = h.mantissa();
if (exp == 0x0ff) {
if (mantissa) {
return FP_NAN;
}
else {
return FP_INFINITE;
}
}
else if (!exp) {
if (mantissa) {
return FP_SUBNORMAL;
}
else {
return FP_ZERO;
}
}
return FP_NORMAL;
}
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t sqrt(cutlass::bfloat16_t const& h) {
#if defined(__CUDACC_RTC__)
return cutlass::bfloat16_t(sqrtf(float(h)));
#else
return cutlass::bfloat16_t(std::sqrt(float(h)));
#endif
}
CUTLASS_HOST_DEVICE
bfloat16_t copysign(bfloat16_t const& a, bfloat16_t const& b) {
uint16_t a_bits;
uint16_t b_bits;
#if defined(__CUDA_ARCH__)
a_bits = reinterpret_cast<uint16_t const &>(a);
b_bits = reinterpret_cast<uint16_t const &>(b);
#else
std::memcpy(&a_bits, &a, sizeof(a_bits));
std::memcpy(&b_bits, &b, sizeof(b_bits));
#endif
uint16_t a_mag = (a_bits & 0x7fff);
uint16_t b_sign = (b_bits & 0x8000);
uint16_t result = (a_mag | b_sign);
return bfloat16_t::bitcast(result);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Standard Library operations and definitions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace std {
#if !defined(__CUDACC_RTC__)
/// Numeric limits
template <>
struct numeric_limits<cutlass::bfloat16_t> {
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_infinity = true;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
static std::float_denorm_style const has_denorm = std::denorm_present;
static bool const has_denorm_loss = true;
static std::float_round_style const round_style = std::round_to_nearest;
static bool const is_iec559 = false;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = 7;
/// Least positive value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t min() { return cutlass::bfloat16_t::bitcast(0x01); }
/// Minimum finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t lowest() { return cutlass::bfloat16_t::bitcast(0xff7f); }
/// Maximum finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t max() { return cutlass::bfloat16_t::bitcast(0x7f7f); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t epsilon() { return cutlass::bfloat16_t::bitcast(0x1000); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t round_error() { return cutlass::bfloat16_t(0.5f); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t infinity() { return cutlass::bfloat16_t::bitcast(0x7f80); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t quiet_NaN() { return cutlass::bfloat16_t::bitcast(0x7fff); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t signaling_NaN() { return cutlass::bfloat16_t::bitcast(0x7fff); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t denorm_min() { return cutlass::bfloat16_t::bitcast(0x1); }
};
#endif
} // namespace std
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Arithmetic operators
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool operator==(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) == float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator!=(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) != float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) < float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<=(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) <= float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) > float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>=(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) >= float(rhs);
}
CUTLASS_HOST_DEVICE
bfloat16_t operator+(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return bfloat16_t(float(lhs) + float(rhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t operator-(bfloat16_t const& lhs) {
return bfloat16_t(-float(lhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t operator-(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return bfloat16_t(float(lhs) - float(rhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t operator*(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return bfloat16_t(float(lhs) * float(rhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t operator/(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return bfloat16_t(float(lhs) / float(rhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator+=(bfloat16_t & lhs, bfloat16_t const& rhs) {
lhs = bfloat16_t(float(lhs) + float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator-=(bfloat16_t & lhs, bfloat16_t const& rhs) {
lhs = bfloat16_t(float(lhs) - float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator*=(bfloat16_t & lhs, bfloat16_t const& rhs) {
lhs = bfloat16_t(float(lhs) * float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator/=(bfloat16_t & lhs, bfloat16_t const& rhs) {
lhs = bfloat16_t(float(lhs) / float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator++(bfloat16_t & lhs) {
float tmp(lhs);
++tmp;
lhs = bfloat16_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator--(bfloat16_t & lhs) {
float tmp(lhs);
--tmp;
lhs = bfloat16_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t operator++(bfloat16_t & lhs, int) {
bfloat16_t ret(lhs);
float tmp(lhs);
tmp++;
lhs = bfloat16_t(tmp);
return ret;
}
CUTLASS_HOST_DEVICE
bfloat16_t operator--(bfloat16_t & lhs, int) {
bfloat16_t ret(lhs);
float tmp(lhs);
tmp--;
lhs = bfloat16_t(tmp);
return ret;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// User-defined literals
//
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t operator "" _bf16(long double x) {
return cutlass::bfloat16_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t operator "" _bf16(unsigned long long int x) {
return cutlass::bfloat16_t(int(x));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/bfloat16.h/0 | {
"file_path": "include/cutlass/bfloat16.h",
"repo_id": "include",
"token_count": 5260
} | 13 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
This file contains definitions and utility functions for describing convolution problem sizes in terms of
activation (NHWC), filter (KRSC), output (NPQK), padding (pad_h, pad_w), stride (stride_h, stride_w), and
dilation (dilation_h, dilation_w). Furthermore, it defines helper functions to map CUTLASS's implicit gemm
tensor extents, sizes, and data types to that of the convolution's extents, sizes, and data types.
* Mapping convolutions to Gemm computation *
Cutlass implements convolutions with the Implicit Gemm algorithm. This algorithm performs a gemm
(general matrix-matrix multiply) on the convolution tensors Activation, Filter, and Output.
The underlying gemm operation follows the standard gemm definition:
C = A * B + C
A and B are input matrices
C is source and output matrix
For the three convolutional operators (Fprop, Dgrad, Wgrad), ImplicitGemm matrices A, B, and C are mapped
to convolution tensors Activation, Filter and Output as described in the table below.
___________________________________________________________________________
ConvolutionalOperator | A | B | C
___________________________________________________________________________
| | | | |
| Fprop | Activation | Filter | Output |
| Dgrad | Output | Filter | Activation |
| Wgrad | Output | Activation | Filter |
___________________________________________________________________________
In convolution codebase, DO NOT mix using (A, B, C) with (Activation, Filter, Output).
For example, it's confusing and error prone to document a convolution class or function
as operating on "A, B, Output." Instead, use the mapping functions below,
and adhere to using either A, B, C or Activation, Filter, Output.
Map elements' data types (ImplicitGemm -> Conv): GemmToConvElementMap
Map elements' data types (Conv -> ImplicitGemm): ConvToGemmElementMap
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm_enumerated_types.h"
#include "cutlass/matrix_coord.h"
namespace cutlass {
namespace conv {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Convolutional operator
enum class Operator {
kFprop,
kDgrad,
kWgrad,
kDeconv
};
/// Distinguishes convolution from cross correlation
enum class Mode {
kCrossCorrelation,
kConvolution
};
/// Selects among several implementation variants trading off performance with simplicity
enum class IteratorAlgorithm {
kAnalytic, ///< functionally correct in all cases but lower performance
kOptimized, ///< optimized for R <= 32, S <= 32 and unity-stride dgrad
kFixedChannels, ///< Analytic algorithm optimized for fixed channel count (C == AccessSize)
kFewChannels, ///< Analytic algorithm optimized for few channels (C divisible by AccessSize)
kFixedStrideDilation ///< Optimized for fixed stride and dilation
};
/// Distinguishes among partial specializations that accelerate certain problems where convolution
/// stride is unit.
enum class StrideSupport {
kStrided, ///< arbitrary convolution stride
kUnity, ///< unit convolution stride
kFixed ///< fixed convolution stride
};
/// Identifies split-K mode
enum class SplitKMode {
kNone,
kSerial,
kParallel
};
/// Identifies group mode
enum class GroupMode {
kNone,
kSingleGroup, ///< One CTA calculates one group or less
kMultipleGroup, ///< One CTA calculates multiple groups
kDepthwise ///< One CTA calculates cta_n groups (problem_size.C == problem_size.K == problem_size.groups)
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Shape of a tensor
template <
int N = 1,
int H = 1,
int W = 1,
int C = 1
>
struct TensorNHWCShape {
static int const kN = N;
static int const kH = H;
static int const kW = W;
static int const kC = C;
static int const kHW = H * W;
static int const kNHW = N * kHW;
static int const kNHWC = N * H * W * C;
static int const kCount = kNHWC;
//
// Static member functions
//
/// Returns a Coord object
CUTLASS_HOST_DEVICE
static Coord<4> toCoord() {
return make_Coord(kN, kH, kW, kC);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Shape of a conv2d stride, which controls how the filter convolves around the input volume
template <
/// Stride in horizontal direction
int u = 1,
/// Stride in vertical direction
int v = 1
>
struct Stride2D {
static int const kU = u;
static int const kV = v;
//
// Static member functions
//
/// Returns a Coord object
CUTLASS_HOST_DEVICE
static Coord<2> toCoord() {
return make_Coord(kU, kV);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/convolution.h/0 | {
"file_path": "include/cutlass/conv/convolution.h",
"repo_id": "include",
"token_count": 2239
} | 14 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template wraps the tile access iterator concept to load whole tiles from tensors in
memory used for implicit GEMM convolution.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename TileAccessIterator_>
class TileIterator {
public:
using TileAccessIterator = TileAccessIterator_;
using Shape = typename TileAccessIterator::Shape;
using Element = typename TileAccessIterator::Element;
using Layout = typename TileAccessIterator::Layout;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = typename TileAccessIterator::ThreadMap;
using AccessType = typename TileAccessIterator::AccessType;
using TensorRef = typename TileAccessIterator::TensorRef;
using Index = typename TileAccessIterator::Index;
using LongIndex = typename TileAccessIterator::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = TileAccessIterator::kIteratorAlgorithm;
static StrideSupport const kStrideSupport = TileAccessIterator::kStrideSupport;
using Params = typename TileAccessIterator::Params;
static int const kConvDim = TileAccessIterator::kConvDim;
using ConvProblemSize = typename TileAccessIterator::ConvProblemSize;
static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<
Element,
ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
private:
/// Internal state
TileAccessIterator tile_access_iterator_;
public:
/// Constructor
CUTLASS_HOST_DEVICE
TileIterator(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
tile_access_iterator_(params, problem_size, ptr, thread_idx, threadblock_offset) { }
CUTLASS_HOST_DEVICE
static Params getParams(ConvProblemSize const &problem_size, Layout const &layout) {
return TileAccessIterator::getParams(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
tile_access_iterator_.set_iteration_index(index);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
tile_access_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIterator &operator++() {
tile_access_iterator_.advance();
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIterator operator++(int) {
TileIterator self(*this);
operator++();
return self;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.clear();
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[idx],
tile_access_iterator_.get() + pointer_offset,
tile_access_iterator_.valid()
);
++tile_access_iterator_;
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
tile_access_iterator_.set_iteration_index(0);
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void advance() {
tile_access_iterator_.advance();
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// dispatch to iterator implementation
return TileAccessIterator::can_implement(problem_size);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Strided Dgrad Tile Iterator
template <typename TileAccessIterator_>
class TileIteratorStridedDgrad {
public:
using TileAccessIterator = TileAccessIterator_;
using Shape = typename TileAccessIterator::Shape;
using Element = typename TileAccessIterator::Element;
using Layout = typename TileAccessIterator::Layout;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = typename TileAccessIterator::ThreadMap;
using AccessType = typename TileAccessIterator::AccessType;
using TensorRef = typename TileAccessIterator::TensorRef;
using Index = typename TileAccessIterator::Index;
using LongIndex = typename TileAccessIterator::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = TileAccessIterator::kIteratorAlgorithm;
static StrideSupport const kStrideSupport = TileAccessIterator::kStrideSupport;
using Params = typename TileAccessIterator::Params;
static int const kConvDim = TileAccessIterator::kConvDim;
using ConvProblemSize = typename TileAccessIterator::ConvProblemSize;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<
Element,
ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
private:
/// Internal state
TileAccessIterator tile_access_iterator_;
public:
/// Constructor (output gradient (Dy) OperandA ctor)
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
tile_access_iterator_(
params,
problem_size,
ptr,
thread_idx,
stride_h_divmod, stride_w_divmod,
start_r, start_s,
threadblock_offset) { }
/// Constructor (filter (w) OperandB ctor)
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
tile_access_iterator_(params,
problem_size,
ptr,
thread_idx,
start_r, start_s,
threadblock_offset) { }
CUTLASS_HOST_DEVICE
static Params getParams(ConvProblemSize const &problem_size, Layout const &layout) {
return TileAccessIterator::getParams(problem_size, layout);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
tile_access_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad &operator++() {
tile_access_iterator_.advance();
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad operator++(int) {
TileIteratorStridedDgrad self(*this);
operator++();
return self;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.clear();
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[c + s * ThreadMap::Iterations::kContiguous],
tile_access_iterator_.get() + pointer_offset,
tile_access_iterator_.valid()
);
++tile_access_iterator_;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
tile_access_iterator_.set_iteration_index(0);
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void advance() {
tile_access_iterator_.advance();
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// dispatch to iterator implementation
return TileAccessIterator::can_implement(problem_size);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv2d_tile_iterator.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv2d_tile_iterator.h",
"repo_id": "include",
"token_count": 3649
} | 15 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dWgradOutputGradientTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
struct Params {
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
Conv3dProblemSize const &problem_size,
Layout const &layout
): layout(layout) {
}
};
private:
Params const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
int filter_k_[ThreadMap::Iterations::kContiguous];
int offset_nzpq_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientTileAccessIteratorAnalytic(
Params const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
// initialize filter_k for every contiguous iteration
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
filter_k_[c] = threadblock_offset.row() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
}
// initialize n, p, q offset for every strided iteration
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_nzpq_[s] = threadblock_offset.column() + thread_coord.strided()
+ s * ThreadMap::Delta::kStrided;
}
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_nzpq_) in GEMM-A by a CTA-K tile
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_nzpq_[s] += Shape::kColumn * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the output gradient tensor Dy that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int nzpq = offset_nzpq_[iteration_strided_];
int n = nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q);
int residual = nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q);
int z = residual / (problem_size_.P * problem_size_.Q);
residual = residual % (problem_size_.P * problem_size_.Q);
int p = residual / problem_size_.Q;
int q = residual % problem_size_.Q;
return TensorCoord(n, z, p, q, filter_k_[iteration_contiguous_]);
}
/// Returns true if the current coordinate is within the output gradient tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.d() < problem_size_.Z &&
coord.h() < problem_size_.P &&
coord.w() < problem_size_.Q &&
coord.c() < problem_size_.K;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 2865
} | 16 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implements several possible threadblock-swizzling functions mapping blockIdx to
Convolution problems.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
static int get_strided_dgrad_tile_m(
cutlass::conv::Conv2dProblemSize const &problem_size,
int tile_size_m) {
// CTAs in M dimension per starting filter position
int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, tile_size_m);
// Inflate number of CTAs in M dimension to cover every strating filter position even those that
// may fall out of valid MMA (Dy * w) but are needed to apply epilogue (beta * Dx_source)
// and point-wise fusion
int tile_m = tile_m_per_filter * int(problem_size.stride().product());
// There is a possible performance optimization here that leads up to 2x speeds than the current
// CUTLASS strided dgrad performance for stride > filter, i.e., stride={2x2} and filter={1x1})
//
// * Optimization *
// Only launch CTAs in M dimension which contribute to a row in Dx output
//
//
// * Constraints *
// (A) stride <= filter, for example, stride={2x2} and filter={3x3}:
// - (A.1): There are no constraints for this case and the optimization does
// affect this case functionality or performance.
// (B) stride > filter, for example, stride={2x2} and filter={1x1}:
// - (B.1): Dx output tensor should be zero initialized
// - (B.2): The kernel epilogue cannot apply beta. Thus, beta should be zero
return tile_m;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for strided dgrad convolution
struct StridedDgradHorizontalThreadblockSwizzle :
public gemm::threadblock::GemmHorizontalThreadblockSwizzle {
using Base = gemm::threadblock::GemmHorizontalThreadblockSwizzle;
CUTLASS_HOST_DEVICE
StridedDgradHorizontalThreadblockSwizzle() { }
/// Returns the shape of the problem in units of logical tiles
/// For ImplicitGemmConvolution Conv2d problem size: conv_operator(NPQK, NHWC, KRSC)
CUTLASS_HOST_DEVICE
static gemm::GemmCoord get_tiled_shape(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
gemm::GemmCoord tile_size,
int split_k_slices) {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
// compute number of tiles in m dimension
int tile_m = get_strided_dgrad_tile_m(problem_size, tile_size.m());
// compute number of tiles in n dimension
int tile_n = (implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n();
return gemm::GemmCoord(
tile_m,
tile_n,
split_k_slices);
}
/// Returns the shape of the problem in units of logical tiles
/// For GEMM problem size (MxNxK) (Do not use base class get_tiled_shape())
private:
using Base::get_tiled_shape;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for strided dgrad convolution
template <int N = 1>
struct StridedDgradIdentityThreadblockSwizzle :
public gemm::threadblock::GemmIdentityThreadblockSwizzle<N> {
using Base = gemm::threadblock::GemmIdentityThreadblockSwizzle<N>;
CUTLASS_HOST_DEVICE
StridedDgradIdentityThreadblockSwizzle() { }
/// Returns the shape of the problem in units of logical tiles
/// For ImplicitGemmConvolution Conv2d problem size: conv_operator(NPQK, NHWC, KRSC)
CUTLASS_HOST_DEVICE
static gemm::GemmCoord get_tiled_shape(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
gemm::GemmCoord tile_size,
int split_k_slices) {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
// compute number of tiles in m dimension
int tile_m = get_strided_dgrad_tile_m(problem_size, tile_size.m());
// compute number of tiles in n dimension
int tile_n = (implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n();
return gemm::GemmCoord(
tile_m,
tile_n,
split_k_slices);
}
/// Returns the shape of the problem in units of logical tiles
/// For GEMM problem size (MxNxK) (Do not use base class get_tiled_shape())
private:
using Base::get_tiled_shape;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for GEMMs
template <int N = 1, int Output_N = 1, int Output_P = 1, int Output_Q = 1>
struct DepthwiseDirect2dConvIdentityThreadblockSwizzle
: public gemm::threadblock::GemmIdentityThreadblockSwizzle<N> {
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvIdentityThreadblockSwizzle() {}
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
static gemm::GemmCoord get_tiled_shape(cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
gemm::GemmCoord tile_size,
int split_k_slices) {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
return gemm::GemmCoord(1,
(implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n(),
split_k_slices);
}
};
} // namespace threadblock
} // namespace conv
} // namespace cutlass
| include/cutlass/conv/threadblock/threadblock_swizzle.h/0 | {
"file_path": "include/cutlass/conv/threadblock/threadblock_swizzle.h",
"repo_id": "include",
"token_count": 2604
} | 17 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/detail/dependent_false.hpp"
#include "cutlass/epilogue/fusion/callbacks.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::collective {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Used to specify epilogue subtile shape or dispatch to automatic computation of subtile shape
struct EpilogueTileAuto {};
// Used to let the builder pick the epilogue schedule automatically.
// Can be overridden with kernel schedule tags in cutlass/gemm/dispatch_policy.hpp
struct EpilogueScheduleAuto {};
struct EpilogueIm2ColScheduleAuto {};
template <
class ArchTag,
class OpClass,
class TileShape_MNK,
class ClusterShape_MNK,
class EpilogueTileType,
class ElementAccumulator,
class ElementCompute,
class ElementC,
class GmemLayoutTagC,
int AlignmentC,
class ElementD,
class GmemLayoutTagD,
int AlignmentD,
class EpilogueScheduleType,
class FusionOpOrCallbacks = cutlass::epilogue::fusion::LinearCombination<ElementD,ElementCompute,ElementC,ElementCompute>,
class Enable = void
>
struct CollectiveBuilder {
static_assert(cutlass::detail::dependent_false<ArchTag>,
"Could not build a collective epilogue for given parameters.");
};
// helper sub-builder for epilogue fusion callbacks (for internal use by CollectiveBuilder only)
namespace detail {
// callbacks builder with operation tag
template<
class DispatchPolicy,
class FusionOp,
class TileShape_MNK,
class EpilogueTile_MN,
class ElementAccumulator,
class = void
>
struct CallbacksBuilder {
using Callbacks = fusion::FusionCallbacks<DispatchPolicy, FusionOp, TileShape_MNK, EpilogueTile_MN>;
};
// callbacks builder with callbacks passthrough
template <
class DispatchPolicy,
class FusionCallbacks,
class TileShape_MNK,
class EpilogueTile_MN,
class ElementAccumulator
>
struct CallbacksBuilder<
DispatchPolicy,
FusionCallbacks,
TileShape_MNK,
EpilogueTile_MN,
ElementAccumulator,
cute::enable_if_t<not is_base_of_v<fusion::FusionOperation, FusionCallbacks>>
> {
using Callbacks = FusionCallbacks;
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "builders/sm90_builder.inl"
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/collective/collective_builder.hpp/0 | {
"file_path": "include/cutlass/epilogue/collective/collective_builder.hpp",
"repo_id": "include",
"token_count": 1142
} | 18 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Visitor tree store operations for the sm90 TMA warp-specialized (ws) epilogue
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/workspace.h"
#include "cute/tensor.hpp"
#include "sm90_visitor_tma_warpspecialized.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::fusion {
using namespace cute;
using namespace detail;
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Elementwise Store Operations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int Stages,
class EpilogueTile,
class Element,
FloatRoundStyle RoundStyle,
class StrideMNL,
class SmemLayoutAtom,
class CopyOpR2S,
int Alignment = 128 / sizeof_bits_v<Element>,
bool EnableNullptr = true // Noop on nullptr params
>
struct Sm90AuxStore {
using ElementAux = Element;
static_assert(Alignment * sizeof_bits_v<Element> % 128 == 0, "sub-16B alignment not supported yet");
constexpr static bool is_m_major = epilogue::collective::detail::is_m_major<StrideMNL>();
// Find the max contiguous layout usable by TMA (if EpilogueTile is a non-compact tiler)
using SmemShapeTma = decltype(make_shape(
max_common_vector(make_layout(get<0>(EpilogueTile{})),make_layout(get<0>(EpilogueTile{}))),
max_common_vector(make_layout(get<1>(EpilogueTile{})),make_layout(get<1>(EpilogueTile{})))));
using SmemLayoutTma = decltype(tile_to_shape(
SmemLayoutAtom{}, SmemShapeTma{},
cute::conditional_t<is_m_major, Step<_2,_1>, Step<_1,_2>>{} ));
using SmemLayout = decltype(tile_to_shape(
SmemLayoutTma{},
make_shape(size<0>(shape(EpilogueTile{})), size<1>(shape(EpilogueTile{})), Int<Stages>{}),
cute::conditional_t<is_m_major, Step<_2,_1,_3>, Step<_1,_2,_3>>{} ));
struct SharedStorage {
alignas(cutlass::detail::alignment_for_swizzle(SmemLayout{}))
array_aligned<Element, size(SmemLayout{})> smem_aux;
};
struct Arguments {
Element* ptr_aux = nullptr;
StrideMNL dAux = {};
};
struct Params {
using TMA_Aux = decltype(make_tma_copy(
SM90_TMA_STORE{},
make_tensor(static_cast<Element*>(nullptr), repeat_like(StrideMNL{}, int32_t(0)), StrideMNL{}),
SmemLayoutTma{}));
TMA_Aux tma_store_aux;
bool is_nullptr = false;
};
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
// Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK)
auto problem_shape_mnkl = append<4>(problem_shape, 1);
auto [M, N, K, L] = problem_shape_mnkl;
bool is_nullptr = false;
if constexpr (EnableNullptr) {
is_nullptr = args.ptr_aux == nullptr;
}
typename Params::TMA_Aux tma_store_aux;
if (not is_nullptr) {
Tensor tensor_aux = make_tensor(args.ptr_aux, make_layout(make_shape(M,N,L), args.dAux));
tma_store_aux = make_tma_copy(SM90_TMA_STORE{}, tensor_aux, SmemLayoutTma{});
}
return {tma_store_aux, is_nullptr};
}
template <class ProblemShape>
static bool
can_implement(ProblemShape const& problem_shape, Arguments const& args) {
return true;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return 0;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
return cutlass::Status::kSuccess;
}
CUTLASS_HOST_DEVICE
Sm90AuxStore() { }
CUTLASS_HOST_DEVICE
Sm90AuxStore(Params const& params, SharedStorage const& shared_storage)
: params_ptr(¶ms),
smem_aux(const_cast<Element*>(shared_storage.smem_aux.data())) { }
Params const* params_ptr;
Element* smem_aux;
CUTLASS_DEVICE bool
is_producer_load_needed() const {
return false;
}
CUTLASS_DEVICE bool
is_C_load_needed() const {
return false;
}
template <class... Args>
CUTLASS_DEVICE auto
get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) {
return EmptyProducerLoadCallbacks{};
}
template <
class RTensor,
class TiledR2S,
class STensorR2S,
class STensorS2G,
class GTensorS2G
>
struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks {
CUTLASS_DEVICE
ConsumerStoreCallbacks(
RTensor&& tC_rAux,
TiledR2S tiled_r2s,
STensorR2S&& tRS_sAux,
STensorS2G&& bSG_sAux,
GTensorS2G&& bSG_gAux,
Params const* params_ptr)
: tiled_r2s(tiled_r2s),
tC_rAux(cute::forward<RTensor>(tC_rAux)),
tRS_sAux(cute::forward<STensorR2S>(tRS_sAux)),
bSG_sAux(cute::forward<STensorS2G>(bSG_sAux)),
bSG_gAux(cute::forward<GTensorS2G>(bSG_gAux)),
params_ptr(params_ptr) {}
TiledR2S tiled_r2s;
RTensor tC_rAux; // (CPY,CPY_M,CPY_N)
STensorR2S tRS_sAux; // (R2S,R2S_M,R2S_N,PIPE)
STensorS2G bSG_sAux; // (S2G,S2G_M,S2G_N,PIPE)
GTensorS2G bSG_gAux; // (S2G,S2G_M,S2G_N,EPI_M,EPI_N)
Params const* params_ptr;
template <typename ElementAccumulator, typename ElementInput, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n,
Array<ElementInput, FragmentSize> const& frg_input) {
using ConvertInput = NumericArrayConverter<Element, ElementInput, FragmentSize, RoundStyle>;
ConvertInput convert_input{};
Tensor tC_rAux_frg = recast<Array<Element, FragmentSize>>(coalesce(tC_rAux)); // (EPI_V)
tC_rAux_frg(epi_v) = convert_input(frg_input);
return frg_input;
}
CUTLASS_DEVICE void
postreduce(int epi_m, int epi_n, int store_iteration, bool issue_smem_store) {
if constexpr (EnableNullptr) {
if (params_ptr->is_nullptr) {
return;
}
}
using RLayoutR2S = decltype(cute::layout(TiledR2S{}.get_slice(0).retile_S(RTensor{})));
Tensor tRS_rAux = make_tensor(tC_rAux.data(), RLayoutR2S{}); // (R2S,R2S_M,R2S_N)
if (issue_smem_store) {
int store_pipe_index = store_iteration % Stages;
copy(tiled_r2s, tRS_rAux, tRS_sAux(_,_,_,store_pipe_index));
}
}
CUTLASS_DEVICE void
tma_store(int epi_m, int epi_n, int store_iteration, bool issue_tma_store) {
if constexpr (EnableNullptr) {
if (params_ptr->is_nullptr) {
return;
}
}
if (issue_tma_store) {
// Issue the TMA store
int store_pipe_index = store_iteration % Stages;
copy(params_ptr->tma_store_aux, bSG_sAux(_,_,_,store_pipe_index), bSG_gAux(_,_,_,epi_m,epi_n));
}
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
auto [M, N, K, L] = args.problem_shape_mnkl;
auto [m, n, k, l] = args.tile_coord_mnkl;
Tensor mAux = params_ptr->tma_store_aux.get_tma_tensor(make_shape(M,N,L)); // (M,N,L)
Tensor gAux = local_tile(mAux, take<0,2>(args.tile_shape_mnk), make_coord(m,n,l)); // (CTA_M,CTA_N)
Tensor tC_gAux = sm90_partition_for_epilogue<ReferenceSrc>( // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
gAux, args.epi_tile, args.tiled_copy, args.thread_idx);
Tensor tC_rAux = make_tensor<Element>(take<0,3>(shape(tC_gAux))); // (CPY,CPY_M,CPY_N)
Tensor sAux_epi = cute::as_position_independent_swizzle_tensor(
make_tensor(make_smem_ptr(smem_aux), SmemLayout{})); // (EPI_TILE_M,EPI_TILE_N,PIPE)
Tensor gAux_epi = flat_divide(gAux, args.epi_tile); // (EPI_TILE_M,EPI_TILE_N,EPI_M,EPI_N)
auto tiled_r2s = conditional_return<ReferenceSrc>(
make_tiled_copy_S(Copy_Atom<CopyOpR2S,Element>{}, args.tiled_copy),
make_tiled_copy_D(Copy_Atom<CopyOpR2S,Element>{}, args.tiled_copy)
);
auto tRS_sAux = tiled_r2s.get_slice(args.thread_idx).partition_D(sAux_epi); // (R2S,R2S_M,R2S_N,PIPE)
ThrCopy thrblk_s2g = params_ptr->tma_store_aux.get_slice(_0{});
Tensor bSG_sAux = thrblk_s2g.partition_S(sAux_epi); // (TMA,TMA_M,TMA_N,PIPE)
Tensor bSG_gAux = thrblk_s2g.partition_D(gAux_epi); // (TMA,TMA_M,TMA_N,EPI_M,EPI_N)
return ConsumerStoreCallbacks<decltype(tC_rAux), decltype(tiled_r2s), decltype(tRS_sAux), decltype(bSG_sAux), decltype(bSG_gAux)>(
cute::move(tC_rAux),
tiled_r2s,
cute::move(tRS_sAux),
cute::move(bSG_sAux),
cute::move(bSG_gAux),
params_ptr);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Reduction Store Operations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
// Scalar reduction
template <
template <class> class RegReduceFn,
template <class> class GmemReduceFn,
class ElementOutput,
class ElementCompute,
FloatRoundStyle RoundStyle,
class StrideMNL = Stride<_0,_0,_0>,
bool EnableNullptr = true // Noop on nullptr params
>
struct Sm90ScalarReduction {
private:
static_assert(
(cute::is_same_v<StrideMNL, Stride<_0,_0, _0>>) || // scalar reduction, e.g. tensor max element
(cute::is_same_v<StrideMNL, Stride<_0,_0, _1>>) || // batched scalar reduction, e.g. per-batch max element
(cute::is_same_v<StrideMNL, Stride<_0,_0,int>>));
static constexpr bool IsAtomic = is_atomic<GmemReduceFn<ElementCompute>>::value;
static_assert(IsAtomic, "non-atomic scalar reduction not supported yet");
public:
struct SharedStorage { };
struct Arguments {
ElementOutput* ptr_scalar = nullptr;
ElementCompute reduction_identity = ElementCompute(0);
StrideMNL dScalar = {};
};
using Params = Arguments;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
return args;
}
template <class ProblemShape>
static bool
can_implement(ProblemShape const& problem_shape, Arguments const& args) {
return true;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return 0;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
if constexpr (IsAtomic) {
auto [M, N, K, L] = problem_shape;
Layout mScalar_layout = make_layout(make_shape(M,N,L), args.dScalar);
if (args.ptr_scalar != nullptr) {
return fill_workspace(args.ptr_scalar, ElementOutput(args.reduction_identity), cosize(mScalar_layout), stream, cuda_adapter);
}
}
return cutlass::Status::kSuccess;
}
CUTLASS_DEVICE bool
is_producer_load_needed() const {
return false;
}
CUTLASS_DEVICE bool
is_C_load_needed() const {
return false;
}
CUTLASS_HOST_DEVICE
Sm90ScalarReduction() { }
CUTLASS_HOST_DEVICE
Sm90ScalarReduction(Params const& params, SharedStorage const& shared_storage)
: params(params) { }
Params const params;
template <class... Args>
CUTLASS_DEVICE auto
get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) {
return EmptyProducerLoadCallbacks{};
}
template<class CTensor, class ThrResidue>
struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks {
CUTLASS_DEVICE
ConsumerStoreCallbacks(
int l_coord,
CTensor tCcScalar,
ThrResidue residue_tCcScalar,
Params const& params)
: scalar(params.reduction_identity),
l_coord(l_coord),
tCcScalar(tCcScalar),
residue_tCcScalar(residue_tCcScalar),
params(params) {}
ElementCompute scalar;
int l_coord;
CTensor tCcScalar; // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
ThrResidue residue_tCcScalar;
Params params;
template <typename ElementAccumulator, typename ElementInput, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n,
Array<ElementInput, FragmentSize> const& frg_input) {
if constexpr (EnableNullptr) {
if (params.ptr_scalar == nullptr) {
return frg_input;
}
}
using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>;
using ReduceInput = RegReduceFn<ElementCompute>;
ConvertInput convert_input{};
ReduceInput reduce_input{};
Array frg_I = convert_input(frg_input);
Tensor tCcScalar_mn = tCcScalar(_,_,_,epi_m,epi_n);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < FragmentSize; ++i) {
if (elem_less(tCcScalar_mn(epi_v * FragmentSize + i), residue_tCcScalar)) {
scalar = reduce_input(scalar, frg_I[i]);
}
}
return frg_input;
}
CUTLASS_DEVICE void
end() {
if constexpr (EnableNullptr) {
if (params.ptr_scalar == nullptr) {
return;
}
}
using ConvertI = NumericConverter<ElementOutput, ElementCompute, RoundStyle>;
using ReduceInput = GmemReduceFn<ElementOutput>;
ConvertI convert_I{};
ReduceInput reduce_input{};
ElementOutput* ptr_scalar = params.ptr_scalar + l_coord * get<2>(params.dScalar);
reduce_input(ptr_scalar, convert_I(scalar));
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
return ConsumerStoreCallbacks<decltype(args.tCcD), decltype(args.residue_tCcD)>(
get<3>(args.tile_coord_mnkl), args.tCcD, args.residue_tCcD, params);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Row vector reduction
template <
template <class> class RegReduceFn,
template <class> class ShuffleReduceFn,
template <class> class GmemReduceFn,
int Stages,
class CtaTileShapeMNK,
class ElementOutput,
class ElementCompute,
FloatRoundStyle RoundStyle,
class StrideMNL = Stride<_0,_1,_0>,
int Alignment = 128 / sizeof_bits_v<ElementOutput>,
bool EnableNullptr = true, // Noop on nullptr params
// If this is false, ptr_row is assumed to point to a compact n-major (ceil_div(M,CTA_M), round_nearest(N,CTA_N), L)
// tensor of ElementCompute. It is the user's responsibility to reduce this to a (N, L) tensor of ElementOutput
bool FinalReduction = true,
// False means skip OOB predication if OOB inputs are known to be the reduction identity
bool VisitCheckOOB = true
>
struct Sm90RowReduction {
private:
static_assert(Stages == 0, "Smem usage not supported yet");
static_assert(Alignment * sizeof_bits_v<ElementOutput> % 128 == 0, "sub-16B alignment not supported yet");
static_assert(
(cute::is_same_v<StrideMNL, Stride<_0,_1, _0>>) || // row vector reduction, e.g. per-col sum over all batches
(cute::is_same_v<StrideMNL, Stride<_0,_1,int>>)); // batched row vector reduction, e.g. per-col sum per batch
static constexpr bool IsAtomic = is_atomic<GmemReduceFn<ElementCompute>>::value;
static_assert(not (IsAtomic && not FinalReduction), "atomic reduction must be final");
public:
struct SharedStorage { };
struct Arguments {
void* ptr_row = nullptr; // ElementOutput* if FinalReduction, else ElementCompute*
ElementCompute reduction_identity = 0;
StrideMNL dRow = {};
};
struct Params {
void* ptr_row = nullptr;
ElementCompute reduction_identity = 0;
StrideMNL dRow = {};
ElementCompute* reduction_buffer = nullptr;
int* tile_counters = nullptr;
};
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
ElementCompute* reduction_buffer;
int* tile_counters = nullptr;
if constexpr (IsAtomic) {
reduction_buffer = nullptr;
}
else if constexpr (FinalReduction) {
auto [M, N, K, L] = problem_shape;
auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{};
size_t tile_counters_offset = product(ceil_div(make_shape(size<>(M), size<>(N), L), make_shape(tile_M, tile_N))) * tile_N * sizeof(ElementCompute);
tile_counters_offset = round_nearest(tile_counters_offset, MinWorkspaceAlignment);
reduction_buffer = reinterpret_cast<ElementCompute*>(workspace);
tile_counters = reinterpret_cast<int*>(reinterpret_cast<uint8_t*>(workspace) + tile_counters_offset);
}
else {
reduction_buffer = reinterpret_cast<ElementCompute*>(args.ptr_row);
}
return {
args.ptr_row,
args.reduction_identity,
args.dRow,
reduction_buffer,
tile_counters
};
}
template <class ProblemShape>
static bool
can_implement(ProblemShape const& problem_shape, Arguments const& args) {
return true;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
if constexpr (IsAtomic || not FinalReduction) {
return 0;
}
size_t workspace_size = 0;
auto [M, N, K, L] = problem_shape;
auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{};
// Increment by size of reduction buffer
workspace_size += product(ceil_div(make_shape(size<>(M),size<>(N),L), make_shape(tile_M, tile_N))) * tile_N * sizeof(ElementCompute);
// Align and increment by size of tile counters
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += cute::ceil_div(size<>(N), tile_N) * sizeof(int);
return workspace_size;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
if constexpr (IsAtomic) {
auto [M, N, K, L] = problem_shape;
Layout mRow_layout = make_layout(make_shape(size<>(M),size<>(N),size<>(L)), args.dRow);
if (args.ptr_row != nullptr) {
return fill_workspace(args.ptr_row, ElementOutput(args.reduction_identity), cosize(mRow_layout), stream, cuda_adapter);
}
return Status::kSuccess;
}
else if constexpr (FinalReduction) {
auto [M, N, K, L] = problem_shape;
auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{};
size_t tile_counters_offset = product(ceil_div(make_shape(size<>(M),size<>(N),L), make_shape(tile_M, tile_N))) * tile_N * sizeof(ElementCompute);
tile_counters_offset = round_nearest(tile_counters_offset, MinWorkspaceAlignment);
int* tile_counters = reinterpret_cast<int*>(reinterpret_cast<uint8_t*>(workspace) + tile_counters_offset);
size_t tile_counters_size = cute::ceil_div(size<>(N), tile_N) * sizeof(int);
return zero_workspace(tile_counters, tile_counters_size, stream, cuda_adapter);
}
else {
return Status::kSuccess;
}
}
CUTLASS_DEVICE bool
is_producer_load_needed() const {
return false;
}
CUTLASS_DEVICE bool
is_C_load_needed() const {
return false;
}
CUTLASS_HOST_DEVICE
Sm90RowReduction() { }
CUTLASS_HOST_DEVICE
Sm90RowReduction(Params const& params, SharedStorage const& shared_storage)
: params(params) { }
Params params;
template <class... Args>
CUTLASS_DEVICE auto
get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) {
return EmptyProducerLoadCallbacks{};
}
template<class ArgsTuple>
struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks {
CUTLASS_DEVICE
ConsumerStoreCallbacks(ArgsTuple&& args_tuple, Params const& params)
: args_tuple(cute::forward<ArgsTuple>(args_tuple)),
params(params) {}
ArgsTuple args_tuple;
Params const& params;
bool do_final_reduction = false;
template <typename ElementAccumulator, typename ElementInput, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n,
Array<ElementInput, FragmentSize> const& frg_input) {
if constexpr (EnableNullptr) {
if (params.ptr_row == nullptr) {
return frg_input;
}
}
auto& [ref_src, tCrRow, tCcRow, gRow_l, cRow, gBuf_ml, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
tile_coord_mnkl, residue_cRow, residue_tCcRow, epi_tile, tiled_copy, thread_idx] = args_tuple;
Tensor tCrRow_mn = tCrRow(_,_,_,epi_m,epi_n);
Tensor tCcRow_mn = tCcRow(_,_,_,epi_m,epi_n);
using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>;
using ReduceInput = RegReduceFn<ElementCompute>;
ConvertInput convert_input{};
ReduceInput reduce_input{};
Array frg_I = convert_input(frg_input);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < FragmentSize; ++i) {
if (!VisitCheckOOB || elem_less(tCcRow_mn(epi_v * FragmentSize + i), residue_tCcRow)) {
ElementCompute& tCrRow_vmn = tCrRow_mn(epi_v * FragmentSize + i);
tCrRow_vmn = reduce_input(tCrRow_vmn, frg_I[i]);
}
}
return frg_input;
}
template <class STensor, class SyncFn, class VTensor>
CUTLASS_DEVICE void
reduce(STensor&& smem_buffer, SyncFn const& sync_fn, int epi_m, int epi_n, bool is_last_iteration, VTensor visit_results) {
if (not is_last_iteration) {
return;
}
auto& [ref_src, tCrRow, tCcRow, gRow_l, cRow, gBuf_ml, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
tile_coord_mnkl, residue_cRow, residue_tCcRow, epi_tile, tiled_copy, thread_idx] = args_tuple;
auto [m, n, k, l] = tile_coord_mnkl;
constexpr bool ReferenceSrc = decltype(ref_src)::value;
if constexpr (EnableNullptr) {
if (params.ptr_row == nullptr) {
return;
}
}
// fully OOB CTA in partially OOB cluster
if (not elem_less(cRow(_0{},_0{}), residue_cRow)) {
return;
}
//
// 1. Warp shuffle reduction
//
using FragmentShuffle = Array<ElementCompute, sizeof(uint64_t) / sizeof(ElementCompute)>;
using ReduceShuffle = ShuffleReduceFn<FragmentShuffle>;
ReduceShuffle reduce_shuffle{};
Tensor tCrRow_frg = recast<FragmentShuffle>(filter(tCrRow));
CUTLASS_PRAGMA_UNROLL
for (int reduction_rows = size<0>(lane_layout_MN) / 2; reduction_rows > 0; reduction_rows /= 2) {
CUTLASS_PRAGMA_UNROLL
for (int frg_idx = 0; frg_idx < size(tCrRow_frg); ++frg_idx) {
uint64_t frg_shfl = reinterpret_cast<uint64_t&>(tCrRow_frg(frg_idx));
frg_shfl = __shfl_down_sync(0xFFFFFFFF, frg_shfl, lane_layout_MN(reduction_rows, _0{}));
tCrRow_frg(frg_idx) = reduce_shuffle(tCrRow_frg(frg_idx), reinterpret_cast<FragmentShuffle&>(frg_shfl));
}
}
bool is_reduced_lane = get<0>(lane_mn) == 0;
//
// 2. Atomic reduction
//
if constexpr (IsAtomic) {
// Filter so we don't issue redunant copies over stride-0 modes
Tensor tCrRow_flt = filter_zeros(tCrRow);
Tensor tCcRow_flt = make_tensor(tCcRow.data(), make_layout(tCrRow_flt.shape(), tCcRow.stride()));
Tensor tCgRow = sm90_partition_for_epilogue<ReferenceSrc>(gRow_l(_,_,l), epi_tile, tiled_copy, thread_idx);
Tensor tCgRow_flt = filter_zeros(tCgRow);
// NOTE: atomic reduction is performed in the output type
using ConvertOutput = NumericConverter<ElementOutput, ElementCompute, RoundStyle>;
using ReduceOutput = GmemReduceFn<ElementOutput>;
ConvertOutput convert_output{};
ReduceOutput reduce_output{};
if (is_reduced_lane) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(tCrRow_flt); ++i) {
if (elem_less(tCcRow_flt(i), residue_tCcRow)) {
reduce_output(&tCgRow_flt(i), convert_output(tCrRow_flt(i)));
}
}
}
sync_fn();
}
//
// 2. One warp in M, skip threadblock smem reduction
//
else if constexpr (decltype(size<0>(warp_layout_MN))::value <= 1) {
// Dump warp reduction to gmem workspace
using ElementGmem = cute::conditional_t<FinalReduction, ElementCompute volatile, ElementCompute>;
Tensor tCgBuf = sm90_partition_for_epilogue<ReferenceSrc>(gBuf_ml(_,_,m,l), epi_tile, tiled_copy, thread_idx);
if (is_reduced_lane) {
// Filter so we don't issue redundant copies over stride-0 modes
// (only works if 0-strides are in same location, which is by construction)
copy_aligned(filter(tCrRow), recast<ElementGmem>(filter(tCgBuf)));
}
sync_fn();
}
//
// 2. Multiple warps in M, do threadblock smem reduction
//
else {
Tensor sBuf = make_tensor(make_smem_ptr<ElementCompute>(raw_pointer_cast(smem_buffer.data())), sBuf_layout);
static_assert(decltype(cosize(sBuf.layout()))::value * sizeof(ElementCompute) <=
decltype(cosize(smem_buffer.layout()))::value * sizeof(typename remove_cvref_t<STensor>::value_type),
"smem reduction buffer not large enough, use a larger epilogue tile");
sync_fn();
// Dump warp reduction to smem workspace
Tensor tCsBuf = sm90_partition_for_epilogue<ReferenceSrc>(sBuf(_,_,get<0>(warp_mn)), epi_tile, tiled_copy, thread_idx);
if (is_reduced_lane) {
// Filter so we don't issue redunant copies over stride-0 modes
// (only works if 0-strides are in same location, which is by construction)
copy_aligned(filter(tCrRow), filter(tCsBuf));
}
sync_fn();
constexpr int SmemFragSize = cute::max(size_t{1}, sizeof(uint32_t) / sizeof(ElementCompute));
using FragmentSmem = Array<ElementCompute, SmemFragSize>;
using VectorSmem = uint_bit_t<sizeof_bits_v<FragmentSmem>>;
using ReduceSmem = GmemReduceFn<FragmentSmem>;
ReduceSmem reduce_smem{};
Tensor sBuf_frg = recast<FragmentSmem>(filter_zeros(sBuf));
Tensor sBuf_vec = recast<VectorSmem>(filter_zeros(sBuf));
constexpr int FragsPerRow = decltype(size<1>(sBuf_frg))::value;
// Do the threadblock smem reduction
CUTLASS_PRAGMA_UNROLL
for (int reduction_rows = size<0>(warp_layout_MN) / 2; reduction_rows > 1; reduction_rows /= 2) {
int FragsPerReduction = reduction_rows * FragsPerRow;
CUTLASS_PRAGMA_NO_UNROLL
for (int frg_idx = thread_idx; frg_idx < FragsPerReduction; frg_idx += size(tiled_copy)) {
FragmentSmem frg_smem = reduce_smem(sBuf_frg(frg_idx), sBuf_frg(frg_idx + FragsPerReduction));
sBuf_vec(frg_idx) = reinterpret_cast<VectorSmem&>(frg_smem);
}
sync_fn();
}
// Do final smem reduction and dump to gmem workspace
using VectorGmem = cute::conditional_t<FinalReduction, VectorSmem volatile, VectorSmem>;
Tensor gBuf_vec = recast<VectorGmem>(filter(gBuf_ml(_,_,m,l)));
CUTLASS_PRAGMA_NO_UNROLL
for (int frg_idx = thread_idx; frg_idx < FragsPerRow; frg_idx += size(tiled_copy)) {
FragmentSmem frg_smem = reduce_smem(sBuf_frg(frg_idx), sBuf_frg(frg_idx + FragsPerRow));
gBuf_vec(frg_idx) = reinterpret_cast<VectorSmem&>(frg_smem);
}
sync_fn();
}
//
// 3. Increment atomic counters to signal final gmem reduction
//
if constexpr (not IsAtomic && FinalReduction) {
// Ensure gmem writes are visible to other threads before incrementing counter
__threadfence();
sync_fn();
// Collective thread 0 increments atomic tile counter and copies value to smem
int* prev_tile_count = reinterpret_cast<int*>(raw_pointer_cast(smem_buffer.data()));
if (thread_idx == 0) {
*prev_tile_count = atomicAdd(¶ms.tile_counters[n], 1);
}
sync_fn();
// Broadcast tile count to other threads in CTA and determine final reduction status
do_final_reduction = *prev_tile_count == size<2>(gBuf_ml) * size<3>(gBuf_ml) - 1;
sync_fn();
}
}
CUTLASS_DEVICE void
end() {
//
// 4. Do final gmem reduction if necessary
//
if constexpr (not IsAtomic && FinalReduction) {
if (not do_final_reduction) {
return;
}
auto& [ref_src, tCrRow, tCcRow, gRow_l, cRow, gBuf_ml, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
tile_coord_mnkl, residue_cRow, residue_tCcRow, epi_tile, tiled_copy, thread_idx] = args_tuple;
using ReduceOutput = GmemReduceFn<ElementCompute>;
using ConvertOutput = NumericConverter<ElementOutput, ElementCompute, RoundStyle>;
ReduceOutput reduce_output{};
ConvertOutput convert_output{};
// Reduction over batches
if (size<2>(stride(gRow_l)) == 0) {
CUTLASS_PRAGMA_NO_UNROLL
for (int n = thread_idx; n < size<1>(gBuf_ml); n += size(tiled_copy)) {
Tensor tRgBuf_ml = gBuf_ml(_0{},n,_,_);
ElementCompute output = tRgBuf_ml(_0{});
CUTLASS_PRAGMA_NO_UNROLL
for (int ml = 1; ml < size(tRgBuf_ml); ++ml) {
output = reduce_output(output, tRgBuf_ml(ml));
}
if (elem_less(cRow(_0{},n), residue_cRow)) {
gRow_l(_0{},n,_0{}) = convert_output(output);
}
}
}
// No reduction over batches
else {
CUTLASS_PRAGMA_NO_UNROLL
for (int n = thread_idx; n < size<1>(gBuf_ml); n += size(tiled_copy)) {
bool do_store = elem_less(cRow(_0{},n), residue_cRow);
CUTLASS_PRAGMA_NO_UNROLL
for (int l = 0; l < size<3>(gBuf_ml); ++l) {
Tensor tRgBuf_m = gBuf_ml(_0{},n,_,l);
ElementCompute output = tRgBuf_m(_0{});
CUTLASS_PRAGMA_NO_UNROLL
for (int m = 1; m < size(tRgBuf_m); ++m) {
output = reduce_output(output, tRgBuf_m(m));
}
if (do_store) {
gRow_l(_0{},n,l) = convert_output(output);
}
}
}
}
}
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
Layout ref_layout_MN = [&] () {
if constexpr (ReferenceSrc) { return get<0>(args.tiled_copy.get_layoutS_MN()); }
else { return get<0>(args.tiled_copy.get_layoutD_MN()); }
}(); // tile_mn -> tv_idx
// Get the MN layout + coord of lanes to determine shuffle reduction iterations
using _W = Int<decltype(args.tiled_copy)::TiledNumThr::value / NumThreadsPerWarp>;
Layout tv2lane = Layout<Shape<Int<NumThreadsPerWarp>,_W,_1>,Stride<_1,_0,_0>>{}; // tv_idx -> lane_idx
Layout ref2lane = composition(tv2lane, ref_layout_MN); // tile_mn -> lane_idx
Layout lane_layout_MN = make_layout(filter(get<0>(ref2lane)), filter(get<1>(ref2lane))); // lane_mn -> lane_idx
Layout inv_lane_layout_MN = right_inverse(lane_layout_MN); // lane_idx -> lane_mn
int lane_idx = canonical_lane_idx();
auto lane_mn = idx2crd(inv_lane_layout_MN(lane_idx), shape(lane_layout_MN));
// Get the MN layout + coord of warps to determine smem reduction iterations
Layout tv2warp = Layout<Shape<Int<NumThreadsPerWarp>,_W,_1>,Stride<_0,_1,_0>>{}; // tv_idx -> warp_idx
Layout ref2warp = composition(tv2warp, ref_layout_MN); // tile_mn -> warp_idx
Layout warp_layout_MN = make_layout(filter(get<0>(ref2warp)), filter(get<1>(ref2warp))); // warp_mn -> warp_idx
Layout inv_warp_layout_MN = right_inverse(warp_layout_MN); // warp_idx -> warp_mn
int warp_idx = args.thread_idx / NumThreadsPerWarp;
auto warp_mn = idx2crd(inv_warp_layout_MN(warp_idx), shape(warp_layout_MN));
// Partition output gmem and register tensors
auto [tile_M, tile_N, tile_K] = args.tile_shape_mnk;
auto [M, N, K, L] = args.problem_shape_mnkl;
auto [m, n, k, l] = args.tile_coord_mnkl;
Tensor mRow = make_tensor(make_gmem_ptr<ElementOutput>(params.ptr_row), make_shape(M,N,L), params.dRow); // (M,N,L)
Tensor gRow_l = local_tile(mRow, take<0,2>(args.tile_shape_mnk), make_coord(m,n,_)); // (CTA_M,CTA_N,L)
Tensor tCgRow = sm90_partition_for_epilogue<ReferenceSrc>( // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
gRow_l(_,_,l), args.epi_tile, args.tiled_copy, args.thread_idx);
Tensor tCrRow = make_tensor_like<ElementCompute>(tCgRow); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
fill(tCrRow, params.reduction_identity);
// Partition gmem+smem reduction buffer tensors
Layout gBuf_layout = make_layout(take<0,2>(args.tile_shape_mnk), make_stride(_0{}, _1{}));
auto block_shape = ceil_div(make_shape(M,N,L), shape(gBuf_layout)); // (M_CNT, N_CNT, L_CNT)
// Let the M_CNT (the num of partial reduction results) become the outer mode
Layout block_layout = make_layout(block_shape, make_stride(get<1>(block_shape), _1{}, get<0>(block_shape) * get<1>(block_shape)));
Layout mBuf_layout = blocked_product(gBuf_layout, block_layout);
Tensor mBuf = make_tensor(make_gmem_ptr(params.reduction_buffer), mBuf_layout); // (ceil_M,ceil_N,L)
Tensor gBuf_ml = local_tile(mBuf, take<0,2>(args.tile_shape_mnk), make_coord(_,n,_)); // (CTA_M,CTA_N,REST_M,L)
Layout sBuf_layout = blocked_product(gBuf_layout, // (CTA_M,CTA_N,WARPS_M)
make_layout(make_shape(_1{},_1{},size<0>(warp_layout_MN))));
auto args_tuple = make_tuple(
bool_constant<ReferenceSrc>{}, cute::move(tCrRow), args.tCcD, gRow_l, args.cD, gBuf_ml, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
args.tile_coord_mnkl, args.residue_cD, args.residue_tCcD, args.epi_tile, args.tiled_copy, args.thread_idx);
return ConsumerStoreCallbacks<decltype(args_tuple)>(cute::move(args_tuple), params);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Col vector reduction
template <
template <class> class RegReduceFn,
template <class> class ShuffleReduceFn,
template <class> class GmemReduceFn,
int Stages,
class CtaTileShapeMNK,
class ElementOutput,
class ElementCompute,
FloatRoundStyle RoundStyle,
class StrideMNL = Stride<_1,_0,_0>,
int Alignment = 128 / sizeof_bits_v<ElementOutput>,
bool EnableNullptr = true, // Noop on nullptr params
// If this is false, ptr_col is assumed to point to a compact m-major (round_nearest(M,CTA_M), ceil_div(N,CTA_N), L)
// tensor of ElementCompute. It is the user's responsibility to reduce this to a (M, L) tensor of ElementOutput
bool FinalReduction = true,
// False means skip OOB predication if OOB inputs are known to be the reduction identity
bool VisitCheckOOB = true
>
struct Sm90ColReduction {
private:
static_assert(Stages == 0, "Smem usage not supported yet");
static_assert(Alignment * sizeof_bits_v<ElementOutput> % 128 == 0, "sub-16B alignment not supported yet");
static_assert(
(cute::is_same_v<StrideMNL, Stride<_1,_0, _0>>) || // col vector reduction, e.g. per-row sum over all batches
(cute::is_same_v<StrideMNL, Stride<_1,_0,int>>)); // batched col vector reduction, e.g. per-row sum per batch
static constexpr bool IsAtomic = is_atomic<GmemReduceFn<ElementCompute>>::value;
static_assert(not (IsAtomic && not FinalReduction), "atomic reduction must be final");
public:
struct SharedStorage { };
struct Arguments {
void* ptr_col = nullptr; // ElementOutput* if FinalReduction, else ElementCompute*
ElementCompute reduction_identity = 0;
StrideMNL dCol = {};
};
struct Params {
void* ptr_col = nullptr;
ElementCompute reduction_identity = 0;
StrideMNL dCol = {};
ElementCompute* reduction_buffer = nullptr;
int* tile_counters = nullptr;
};
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
ElementCompute* reduction_buffer;
int* tile_counters = nullptr;
if constexpr (IsAtomic) {
reduction_buffer = nullptr;
}
else if constexpr (FinalReduction) {
auto [M, N, K, L] = problem_shape;
auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{};
size_t tile_counters_offset = product(ceil_div(make_shape(M,N,L), make_shape(tile_M, tile_N))) * tile_M * sizeof(ElementCompute);
tile_counters_offset = round_nearest(tile_counters_offset, MinWorkspaceAlignment);
reduction_buffer = reinterpret_cast<ElementCompute*>(workspace);
tile_counters = reinterpret_cast<int*>(reinterpret_cast<uint8_t*>(workspace) + tile_counters_offset);
}
else {
reduction_buffer = reinterpret_cast<ElementCompute*>(args.ptr_col);
}
return {
args.ptr_col,
args.reduction_identity,
args.dCol,
reduction_buffer,
tile_counters
};
}
template <class ProblemShape>
static bool
can_implement(ProblemShape const& problem_shape, Arguments const& args) {
return true;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
if constexpr (IsAtomic || not FinalReduction) {
return 0;
}
size_t workspace_size = 0;
auto [M, N, K, L] = problem_shape;
auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{};
// Increment by size of reduction buffer
workspace_size += product(ceil_div(make_shape(M,N,L), make_shape(tile_M, tile_N))) * tile_M * sizeof(ElementCompute);
// Align and increment by size of tile counters
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += cute::ceil_div(M, tile_M) * sizeof(int);
return workspace_size;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
if constexpr (IsAtomic) {
auto [M, N, K, L] = problem_shape;
Layout mCol_layout = make_layout(make_shape(size<>(M),size<>(N),size<>(L)), args.dCol);
if (args.ptr_col != nullptr) {
return fill_workspace(args.ptr_col, ElementOutput(args.reduction_identity), cosize(mCol_layout), stream, cuda_adapter);
}
return Status::kSuccess;
}
else if constexpr (FinalReduction) {
auto [M, N, K, L] = problem_shape;
auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{};
size_t tile_counters_offset = product(ceil_div(make_shape(M,N,L), make_shape(tile_M, tile_N))) * tile_M * sizeof(ElementCompute);
tile_counters_offset = round_nearest(tile_counters_offset, MinWorkspaceAlignment);
int* tile_counters = reinterpret_cast<int*>(reinterpret_cast<uint8_t*>(workspace) + tile_counters_offset);
size_t tile_counters_size = cute::ceil_div(M, tile_M) * sizeof(int);
return zero_workspace(tile_counters, tile_counters_size, stream, cuda_adapter);
}
else {
return Status::kSuccess;
}
}
CUTLASS_DEVICE bool
is_producer_load_needed() const {
return false;
}
CUTLASS_DEVICE bool
is_C_load_needed() const {
return false;
}
CUTLASS_HOST_DEVICE
Sm90ColReduction() { }
CUTLASS_HOST_DEVICE
Sm90ColReduction(Params const& params, SharedStorage const& shared_storage)
: params(params) { }
Params params;
template <class... Args>
CUTLASS_DEVICE auto
get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) {
return EmptyProducerLoadCallbacks{};
}
template<class ArgsTuple>
struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks {
CUTLASS_DEVICE
ConsumerStoreCallbacks(ArgsTuple&& args_tuple, Params const& params)
: args_tuple(cute::forward<ArgsTuple>(args_tuple)),
params(params) {}
ArgsTuple args_tuple;
Params const& params;
bool do_final_reduction = false;
template <typename ElementAccumulator, typename ElementInput, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n,
Array<ElementInput, FragmentSize> const& frg_input) {
if constexpr (EnableNullptr) {
if (params.ptr_col == nullptr) {
return frg_input;
}
}
auto& [ref_src, tCrCol, tCcCol, gCol_l, cCol, gBuf_nl, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
tile_coord_mnkl, residue_cCol, residue_tCcCol, epi_tile, tiled_copy, thread_idx] = args_tuple;
Tensor tCrCol_mn = tCrCol(_,_,_,epi_m,epi_n);
Tensor tCcCol_mn = tCcCol(_,_,_,epi_m,epi_n);
using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>;
using ReduceInput = RegReduceFn<ElementCompute>;
ConvertInput convert_input{};
ReduceInput reduce_input{};
Array frg_I = convert_input(frg_input);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < FragmentSize; ++i) {
if (!VisitCheckOOB || elem_less(tCcCol_mn(epi_v * FragmentSize + i), residue_tCcCol)) {
ElementCompute& tCrCol_vmn = tCrCol_mn(epi_v * FragmentSize + i);
tCrCol_vmn = reduce_input(tCrCol_vmn, frg_I[i]);
}
}
return frg_input;
}
template <class STensor, class SyncFn, class VTensor>
CUTLASS_DEVICE void
reduce(STensor&& smem_buffer, SyncFn const& sync_fn, int epi_m, int epi_n, bool is_last_iteration, VTensor visit_results) {
if (not is_last_iteration) {
return;
}
auto& [ref_src, tCrCol, tCcCol, gCol_l, cCol, gBuf_nl, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
tile_coord_mnkl, residue_cCol, residue_tCcCol, epi_tile, tiled_copy, thread_idx] = args_tuple;
auto [m, n, k, l] = tile_coord_mnkl;
constexpr bool ReferenceSrc = decltype(ref_src)::value;
// Runtime nullptr is noop
if constexpr (EnableNullptr) {
if (params.ptr_col == nullptr) {
return;
}
}
// fully OOB CTA in partially OOB cluster
if (not elem_less(cCol(_0{},_0{}), residue_cCol)) {
return;
}
//
// 1. Warp shuffle reduction
//
using FragmentShuffle = Array<ElementCompute, sizeof(uint64_t) / sizeof(ElementCompute)>;
using ReduceShuffle = ShuffleReduceFn<FragmentShuffle>;
ReduceShuffle reduce_shuffle{};
Tensor tCrCol_frg = recast<FragmentShuffle>(filter(tCrCol));
CUTLASS_PRAGMA_UNROLL
for (int reduction_cols = size<1>(lane_layout_MN) / 2; reduction_cols > 0; reduction_cols /= 2) {
CUTLASS_PRAGMA_UNROLL
for (int frg_idx = 0; frg_idx < size(tCrCol_frg); ++frg_idx) {
uint64_t frg_shfl = reinterpret_cast<uint64_t&>(tCrCol_frg(frg_idx));
frg_shfl = __shfl_down_sync(0xFFFFFFFF, frg_shfl, lane_layout_MN(_0{},reduction_cols));
tCrCol_frg(frg_idx) = reduce_shuffle(tCrCol_frg(frg_idx), reinterpret_cast<FragmentShuffle&>(frg_shfl));
}
}
bool is_reduced_lane = get<1>(lane_mn) == 0;
//
// 2. Atomic reduction
//
if constexpr (IsAtomic) {
// Filter so we don't issue redunant copies over stride-0 modes
Tensor tCrCol_flt = filter_zeros(tCrCol);
Tensor tCcCol_flt = make_tensor(tCcCol.data(), make_layout(tCrCol_flt.shape(), tCcCol.stride()));
Tensor tCgCol = sm90_partition_for_epilogue<ReferenceSrc>(gCol_l(_,_,l), epi_tile, tiled_copy, thread_idx);
Tensor tCgCol_flt = filter_zeros(tCgCol);
// NOTE: atomic reduction is performed in the output type
using ConvertOutput = NumericConverter<ElementOutput, ElementCompute, RoundStyle>;
using ReduceOutput = GmemReduceFn<ElementOutput>;
ConvertOutput convert_output{};
ReduceOutput reduce_output{};
if (is_reduced_lane) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(tCrCol_flt); ++i) {
if (elem_less(tCcCol_flt(i), residue_tCcCol)) {
reduce_output(&tCgCol_flt(i), convert_output(tCrCol_flt(i)));
}
}
}
sync_fn();
}
//
// 2. One warp in N, skip threadblock smem reduction
//
else if constexpr (decltype(size<1>(warp_layout_MN))::value <= 1) {
// Dump warp reduction to gmem workspace
using ElementGmem = cute::conditional_t<FinalReduction, ElementCompute volatile, ElementCompute>;
Tensor tCgBuf = sm90_partition_for_epilogue<ReferenceSrc>(gBuf_nl(_,_,n,l), epi_tile, tiled_copy, thread_idx);
if (is_reduced_lane) {
// Filter so we don't issue redundant copies over stride-0 modes
// (only works if 0-strides are in same location, which is by construction)
copy_aligned(filter(tCrCol), recast<ElementGmem>(filter(tCgBuf)));
}
sync_fn();
}
//
// 2. Multiple warps in N, do threadblock smem reduction
//
else {
Tensor sBuf = make_tensor(make_smem_ptr<ElementCompute>(raw_pointer_cast(smem_buffer.data())), sBuf_layout);
static_assert(decltype(cosize(sBuf.layout()))::value * sizeof(ElementCompute) <=
decltype(cosize(smem_buffer.layout()))::value * sizeof(typename remove_cvref_t<STensor>::value_type),
"smem reduction buffer not large enough, use a larger epilogue tile");
sync_fn();
// Dump warp reduction to smem workspace
Tensor tCsBuf = sm90_partition_for_epilogue<ReferenceSrc>(sBuf(_,_,get<1>(warp_mn)), epi_tile, tiled_copy, thread_idx);
if (is_reduced_lane) {
// Filter so we don't issue redunant copies over stride-0 modes
// (only works if 0-strides are in same location, which is by construction)
copy_aligned(filter(tCrCol), filter(tCsBuf));
}
sync_fn();
constexpr int SmemFragSize = cute::max(size_t{1}, sizeof(uint32_t) / sizeof(ElementCompute));
using FragmentSmem = Array<ElementCompute, SmemFragSize>;
using VectorSmem = uint_bit_t<sizeof_bits_v<FragmentSmem>>;
using ReduceSmem = GmemReduceFn<FragmentSmem>;
ReduceSmem reduce_smem{};
Tensor sBuf_frg = recast<FragmentSmem>(filter_zeros(sBuf));
Tensor sBuf_vec = recast<VectorSmem>(filter_zeros(sBuf));
constexpr int FragsPerCol = decltype(size<0>(sBuf_frg))::value;
// Do the threadblock smem reduction
CUTLASS_PRAGMA_UNROLL
for (int reduction_cols = size<1>(warp_layout_MN) / 2; reduction_cols > 1; reduction_cols /= 2) {
int FragsPerReduction = reduction_cols * FragsPerCol;
CUTLASS_PRAGMA_NO_UNROLL
for (int frg_idx = thread_idx; frg_idx < FragsPerReduction; frg_idx += size(tiled_copy)) {
FragmentSmem frg_smem = reduce_smem(sBuf_frg(frg_idx), sBuf_frg(frg_idx + FragsPerReduction));
sBuf_vec(frg_idx) = reinterpret_cast<VectorSmem&>(frg_smem);
}
sync_fn();
}
// Do final smem reduction and dump to gmem workspace
using VectorGmem = cute::conditional_t<FinalReduction, VectorSmem volatile, VectorSmem>;
Tensor gBuf_vec = recast<VectorGmem>(filter(gBuf_nl(_,_,n,l)));
CUTLASS_PRAGMA_NO_UNROLL
for (int frg_idx = thread_idx; frg_idx < FragsPerCol; frg_idx += size(tiled_copy)) {
FragmentSmem frg_smem = reduce_smem(sBuf_frg(frg_idx), sBuf_frg(frg_idx + FragsPerCol));
gBuf_vec(frg_idx) = reinterpret_cast<VectorSmem&>(frg_smem);
}
sync_fn();
}
//
// 3. Increment atomic counters to signal final gmem reduction
//
if constexpr (not IsAtomic && FinalReduction) {
// Ensure gmem writes are visible to other threads before incrementing counter
__threadfence();
sync_fn();
// Collective thread 0 increments atomic tile counter and copies value to smem
int* prev_tile_count = reinterpret_cast<int*>(raw_pointer_cast(smem_buffer.data()));
if (thread_idx == 0) {
*prev_tile_count = atomicAdd(¶ms.tile_counters[m], 1);
}
sync_fn();
// Broadcast tile count to other threads in CTA and determine final reduction status
do_final_reduction = *prev_tile_count == size<2>(gBuf_nl) * size<3>(gBuf_nl) - 1;
sync_fn();
}
}
CUTLASS_DEVICE void
end() {
//
// 4. Do final gmem reduction if necessary
//
if constexpr (not IsAtomic && FinalReduction) {
if (not do_final_reduction) {
return;
}
auto& [ref_src, tCrCol, tCcCol, gCol_l, cCol, gBuf_nl, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
tile_coord_mnkl, residue_cCol, residue_tCcCol, epi_tile, tiled_copy, thread_idx] = args_tuple;
using ReduceOutput = GmemReduceFn<ElementCompute>;
using ConvertOutput = NumericConverter<ElementOutput, ElementCompute, RoundStyle>;
ReduceOutput reduce_output{};
ConvertOutput convert_output{};
// Reduction over batches
if (size<2>(stride(gCol_l)) == 0) {
CUTLASS_PRAGMA_NO_UNROLL
for (int m = thread_idx; m < size<0>(gBuf_nl); m += size(tiled_copy)) {
Tensor tRgBuf_nl = gBuf_nl(m,_0{},_,_);
ElementCompute output = tRgBuf_nl(_0{});
CUTLASS_PRAGMA_NO_UNROLL
for (int nl = 1; nl < size(tRgBuf_nl); ++nl) {
output = reduce_output(output, tRgBuf_nl(nl));
}
if (elem_less(cCol(m,_0{}), residue_cCol)) {
gCol_l(m,_0{},_0{}) = convert_output(output);
}
}
}
// No reduction over batches
else {
CUTLASS_PRAGMA_NO_UNROLL
for (int m = thread_idx; m < size<0>(gBuf_nl); m += size(tiled_copy)) {
bool do_store = elem_less(cCol(m,_0{}), residue_cCol);
CUTLASS_PRAGMA_NO_UNROLL
for (int l = 0; l < size<3>(gBuf_nl); ++l) {
Tensor tRgBuf_n = gBuf_nl(m,_0{},_,l);
ElementCompute output = tRgBuf_n(_0{});
CUTLASS_PRAGMA_NO_UNROLL
for (int n = 1; n < size(tRgBuf_n); ++n) {
output = reduce_output(output, tRgBuf_n(n));
}
if (do_store) {
gCol_l(m,_0{},l) = convert_output(output);
}
}
}
}
}
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
Layout ref_layout_MN = [&] () {
if constexpr (ReferenceSrc) { return get<0>(args.tiled_copy.get_layoutS_MN()); }
else { return get<0>(args.tiled_copy.get_layoutD_MN()); }
}(); // tile_mn -> tv_idx
// Get the MN layout + coord of lanes to determine shuffle reduction iterations
using _W = Int<decltype(args.tiled_copy)::TiledNumThr::value / NumThreadsPerWarp>;
Layout tv2lane = Layout<Shape<Int<NumThreadsPerWarp>,_W,_1>,Stride<_1,_0,_0>>{}; // tv_idx -> lane_idx
Layout ref2lane = composition(tv2lane, ref_layout_MN); // tile_mn -> lane_idx
Layout lane_layout_MN = make_layout(filter(get<0>(ref2lane)), filter(get<1>(ref2lane))); // lane_mn -> lane_idx
Layout inv_lane_layout_MN = right_inverse(lane_layout_MN); // lane_idx -> lane_mn
int lane_idx = canonical_lane_idx();
auto lane_mn = idx2crd(inv_lane_layout_MN(lane_idx), shape(lane_layout_MN));
// Get the MN layout + coord of warps to determine smem reduction iterations
Layout tv2warp = Layout<Shape<Int<NumThreadsPerWarp>,_W,_1>,Stride<_0,_1,_0>>{}; // tv_idx -> warp_idx
Layout ref2warp = composition(tv2warp, ref_layout_MN); // tile_mn -> warp_idx
Layout warp_layout_MN = make_layout(filter(get<0>(ref2warp)), filter(get<1>(ref2warp))); // warp_mn -> warp_idx
Layout inv_warp_layout_MN = right_inverse(warp_layout_MN); // warp_idx -> warp_mn
int warp_idx = args.thread_idx / NumThreadsPerWarp;
auto warp_mn = idx2crd(inv_warp_layout_MN(warp_idx), shape(warp_layout_MN));
// Partition output gmem and register tensors
auto [tile_M, tile_N, tile_K] = args.tile_shape_mnk;
auto [M, N, K, L] = args.problem_shape_mnkl;
auto [m, n, k, l] = args.tile_coord_mnkl;
Tensor mCol = make_tensor(make_gmem_ptr<ElementOutput>(params.ptr_col), make_shape(M,N,L), params.dCol); // (M,N,L)
Tensor gCol_l = local_tile(mCol, take<0,2>(args.tile_shape_mnk), make_coord(m,n,_)); // (CTA_M,CTA_N,L)
Tensor tCgCol = sm90_partition_for_epilogue<ReferenceSrc>( // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
gCol_l(_,_,l), args.epi_tile, args.tiled_copy, args.thread_idx);
Tensor tCrCol = make_tensor_like<ElementCompute>(tCgCol); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
fill(tCrCol, params.reduction_identity);
// Partition gmem+smem reduction buffer tensors
Layout gBuf_layout = make_layout(take<0,2>(args.tile_shape_mnk), make_stride(_1{}, _0{}));
Layout mBuf_layout = blocked_product(gBuf_layout, make_layout(ceil_div(make_shape(M,N,L), shape(gBuf_layout))));
Tensor mBuf = make_tensor(make_gmem_ptr(params.reduction_buffer), mBuf_layout); // (ceil_M,ceil_N,L)
Tensor gBuf_nl = local_tile(mBuf, take<0,2>(args.tile_shape_mnk), make_coord(m,_,_)); // (CTA_M,CTA_N,REST_N,L)
Layout sBuf_layout = blocked_product(gBuf_layout,make_layout(make_shape(_1{},_1{},size<1>(warp_layout_MN)))); // (CTA_M,CTA_N,WARPS_N)
auto args_tuple = make_tuple(
bool_constant<ReferenceSrc>{}, cute::move(tCrCol), args.tCcD, gCol_l, args.cD, gBuf_nl, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
args.tile_coord_mnkl, args.residue_cD, args.residue_tCcD, args.epi_tile, args.tiled_copy, args.thread_idx);
return ConsumerStoreCallbacks<decltype(args_tuple)>(std::move(args_tuple), params);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Batch matrix reduction
template <
int Stages,
class EpilogueTile,
class Element,
class StrideMNL,
class CopyOpR2S,
class SmemLayoutAtom,
int Alignment = 128 / sizeof_bits_v<Element>,
bool EnableNullptr = true // Noop on nullptr params
>
struct Sm90MatrixReduction;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::fusion
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/fusion/sm90_visitor_store_tma_warpspecialized.hpp/0 | {
"file_path": "include/cutlass/epilogue/fusion/sm90_visitor_store_tma_warpspecialized.hpp",
"repo_id": "include",
"token_count": 25981
} | 19 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
struct LinearCombinationParams {
uint64_t alpha_data[2];
uint64_t beta_data[2];
CUTLASS_HOST_DEVICE
LinearCombinationParams()
: alpha_data {0lu, 0lu}, beta_data {0lu, 0lu}
{ }
template <typename ElementCompute>
CUTLASS_HOST_DEVICE
LinearCombinationParams(ElementCompute alpha, ElementCompute beta)
: alpha_data {0lu, 0lu}, beta_data {0lu, 0lu}
{
#if defined(__CUDA_ARCH__)
reinterpret_cast<ElementCompute&>(alpha_data) = alpha;
reinterpret_cast<ElementCompute&>(beta_data) = beta;
#else
memcpy( alpha_data, &alpha, sizeof(ElementCompute) );
memcpy( beta_data, &beta, sizeof(ElementCompute) );
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/thread/linear_combination_params.h/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination_params.h",
"repo_id": "include",
"token_count": 799
} | 20 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_relu0.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_hardswish.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_conv.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator_mixed.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/threadblock/interleaved_epilogue.h"
#include "cutlass/layout/permute.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename ElementOutput,
typename ElementAccumulator,
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
ElementAccumulator,
layout::RowMajor
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
ElementAccumulator
>;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for float <= float x 4
template <
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<float, float, 4, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
float,
layout::RowMajor
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
float
>;
static int const kFragmentsPerIteration = 2;
};
/// Partial specialization for int32_t <= int32_t
template <
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<int32_t, int32_t, ElementsPerAccess, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
int32_t,
layout::RowMajor
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
int32_t
>;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for float <= int32_t
template <
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<float, int32_t, ElementsPerAccess, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
int32_t,
layout::RowMajor
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
int32_t
>;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for half <= float x 8 epilogues avoids shared memory bank conflicts.
template <
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<
half_t,
float,
8,
ThreadblockShape,
WarpShape,
InstructionShape,
ThreadMap> {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOpMixed<
WarpShape,
InstructionShape,
float,
32,
16,
8,
8
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIteratorMixed<
ThreadMap,
float,
32,
16,
8,
8
>;
static int const kFragmentsPerIteration = 2;
};
/// Partial specialization for half <= int32_t x 8 epilogues avoids shared memory bank conflicts.
template <
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<
half_t,
int32_t,
8,
ThreadblockShape,
WarpShape,
InstructionShape,
ThreadMap> {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOpMixed<
WarpShape,
InstructionShape,
int32_t,
32,
16,
8,
8
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIteratorMixed<
ThreadMap,
int32_t,
32,
16,
8,
8
>;
static int const kFragmentsPerIteration = 2;
};
/// Partial specialization for int8/int4b_t <= int32 x 16/8 epilogues avoids shared memory bank conflicts.
/// Threadblock::kN = 256 still has bank conflicts.
template <
typename ElementOutput,
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<
ElementOutput,
int32_t,
ElementsPerAccess,
ThreadblockShape,
WarpShape,
InstructionShape,
ThreadMap> {
static_assert(platform::is_same<ElementOutput, cutlass::int4b_t>::value ||
platform::is_same<ElementOutput, cutlass::uint4b_t>::value ||
platform::is_same<ElementOutput, int8_t>::value ||
platform::is_same<ElementOutput, uint8_t>::value,
"ElementOutput needs to be 4 or 8 bit (unsigned) int.");
static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8 || ElementsPerAccess == 4),
"ElementsPerAccess needs to be 16 or 8.");
using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed<
WarpShape,
InstructionShape,
int32_t,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
int32_t,
layout::RowMajor
>;
using WarpTileIterator = typename platform::conditional<
(ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4),
WarpTileIteratorNotMixed,
WarpTileIteratorMixed>::type;
using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed<
ThreadMap,
int32_t,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
int32_t
>;
using SharedLoadIterator = typename platform::conditional<
(ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4),
SharedLoadIteratorNotMixed,
SharedLoadIteratorMixed>::type;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for float_e4m3_t <= float x 16/8 epilogues avoids shared memory bank conflicts.
/// Threadblock::kN = 256 still has bank conflicts.
template <
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<
cutlass::float_e4m3_t,
float,
ElementsPerAccess,
ThreadblockShape,
WarpShape,
InstructionShape,
ThreadMap> {
using ElementOutput = cutlass::float_e4m3_t;
static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8 || ElementsPerAccess == 4),
"ElementsPerAccess needs to be 16 or 8.");
using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed<
WarpShape,
InstructionShape,
float,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
float,
layout::RowMajor
>;
using WarpTileIterator = typename platform::conditional<
(ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4),
WarpTileIteratorNotMixed,
WarpTileIteratorMixed>::type;
using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed<
ThreadMap,
float,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
float
>;
using SharedLoadIterator = typename platform::conditional<
(ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4),
SharedLoadIteratorNotMixed,
SharedLoadIteratorMixed>::type;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for float_e5m2_t <= float x 16/8 epilogues avoids shared memory bank conflicts.
/// Threadblock::kN = 256 still has bank conflicts.
template <
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<
cutlass::float_e5m2_t,
float,
ElementsPerAccess,
ThreadblockShape,
WarpShape,
InstructionShape,
ThreadMap> {
using ElementOutput = cutlass::float_e5m2_t;
static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8 || ElementsPerAccess == 4),
"ElementsPerAccess needs to be 16 or 8.");
using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed<
WarpShape,
InstructionShape,
float,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
float,
layout::RowMajor
>;
using WarpTileIterator = typename platform::conditional<
(ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4),
WarpTileIteratorNotMixed,
WarpTileIteratorMixed>::type;
using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed<
ThreadMap,
float,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
float
>;
using SharedLoadIterator = typename platform::conditional<
(ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4),
SharedLoadIteratorNotMixed,
SharedLoadIteratorMixed>::type;
static int const kFragmentsPerIteration = 1;
};
} // namespace detail
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess,
bool ScatterD = false,
typename PermuteDLayout = layout::NoPermute,
conv::StrideSupport StrideSupport = conv::StrideSupport::kUnity,
int Rank = 4
>
struct DefaultEpilogueTensorOp {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
static conv::StrideSupport const kStrideSupport = StrideSupport;
static int const kRank = Rank;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
static bool const UseCUDAStore = platform::is_same<ElementOutput, double>::value;
using PackedOutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout,
UseCUDAStore
>;
using StridedOutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorConv<
OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout,
UseCUDAStore,
kRank
>;
using OutputTileIterator = typename platform::conditional<StrideSupport == cutlass::conv::StrideSupport::kUnity,
PackedOutputTileIterator,
StridedOutputTileIterator>::type;
using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value,
cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC>,
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC> >::type;
/// Support several implementations depending on structure of epilogue
using DefaultIterators = detail::DefaultIteratorsTensorOp<
ElementOutput,
ElementAccumulator,
kElementsPerAccess,
Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename OutputTileThreadMap::CompactedThreadMap
>;
using WarpTileIterator = typename DefaultIterators::WarpTileIterator;
using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>;
static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1);
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding,
kFragmentsPerIteration
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueTensorOpStridedDgrad {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad<
OutputTileThreadMap,
ElementOutput
>;
using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value,
cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC>,
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC> >::type;
/// Support several implementations depending on structure of epilogue
using DefaultIterators = detail::DefaultIteratorsTensorOp<
ElementOutput,
ElementAccumulator,
kElementsPerAccess,
Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename OutputTileThreadMap::CompactedThreadMap
>;
using WarpTileIterator = typename DefaultIterators::WarpTileIterator;
using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>;
static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1);
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding,
kFragmentsPerIteration
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
int Rank,
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueTensorOpAffineRankN {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankN<
OutputTileThreadMap,
ElementOutput,
Rank
>;
// Map to the row major iterator since the iterator selection for affineN is the same.
using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value,
cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
layout::RowMajor>,
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
layout::RowMajor> >::type;
/// Support several implementations depending on structure of epilogue
using DefaultIterators = detail::DefaultIteratorsTensorOp<
ElementOutput,
ElementAccumulator,
kElementsPerAccess,
Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename OutputTileThreadMap::CompactedThreadMap
>;
using WarpTileIterator = typename DefaultIterators::WarpTileIterator;
using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>;
static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1);
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding,
kFragmentsPerIteration
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps which uses
/// intereleaved output layout. For this case, shared memory is not needed.
template <typename Shape_, typename WarpMmaTensorOp_, int PartitionsK,
typename OutputOp_, int ElementsPerAccess, int InterleavedK,
bool isSplitK = false>
struct DefaultInterleavedEpilogueTensorOp {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::
DefaultInterleavedThreadMapTensorOp<
Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput,
kElementsPerAccess, InterleavedK>::Type;
using OutputTileIterator =
cutlass::epilogue::threadblock::InterleavedPredicatedTileIterator<
OutputTileThreadMap, ElementOutput, InterleavedK>;
using AccumulatorFragmentIterator =
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::InterleavedEpilogue<
Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator,
AccumulatorFragmentIterator, OutputOp, InterleavedK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps which uses
/// intereleaved output layout. For this case, shared memory is not needed.
template <typename Shape_, typename WarpMmaTensorOp_, int PartitionsK,
typename OutputOp_, int ElementsPerAccess, int InterleavedK,
bool isSplitK = false>
struct DefaultInterleavedConvEpilogue {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::
DefaultInterleavedConvThreadMapTensorOp<
Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput,
kElementsPerAccess, InterleavedK>::Type;
using OutputTileIterator =
cutlass::epilogue::threadblock::InterleavedConvPredicatedTileIterator<
OutputTileThreadMap, ElementOutput, InterleavedK>;
using AccumulatorFragmentIterator =
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
// can reuse the gemm version here to do element selection
layout::ColumnMajorInterleaved<InterleavedK>>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::InterleavedEpilogue<
Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator,
AccumulatorFragmentIterator, OutputOp, InterleavedK>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/default_epilogue_tensor_op.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/default_epilogue_tensor_op.h",
"repo_id": "include",
"token_count": 10917
} | 21 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs and convolution using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_ ///< Output operator
>
class EpilogueDirectStore {
public:
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
using WarpShape = typename WarpMmaOperator_::Shape;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using OutputOp = OutputOp_;
using Padding = MatrixShape<0, 0>;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
kPartitionsK
>;
/// Use this to control the granularity of one epilogue 'iteration'
static int const kFragmentsPerIteration = 1;
static int constexpr kSmemTiles = 1;
static int constexpr kSmemPointerOffset = 0;
/// Shared storage allocation needed by the epilogue
struct SharedStorage { } ;
private:
// Assume accumulator tile is multipile interleaved 32x32 tile.
static int const kElementsPerPartial = 4;
using EleShapePerPatial = typename platform::conditional<
platform::is_same<ElementAccumulator, float>::value,
MatrixShape<2, 2>,
MatrixShape<1, 4> >::type;
static int const kElementsPerMma = 8;
static int const kAccumulatorPatials = 2;
using QuadShapePerPatialMma = MatrixShape<4, 4>;
static_assert(OutputOp::kCount >= 2,
"The direct store epilogue for Tensor Ops requires the output functor have kCount >= 2.");
private:
LongIndex warp_offset;
int thread_idx;
int warp_idx;
int lane_idx;
int warp_m, warp_n; // warp coordinates within a cta
int tid_m, tid_n; // thread coordinates within a warp
public:
/// Constructor
CUTLASS_DEVICE
EpilogueDirectStore(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx_, ///< ID of a thread within the threadblock
int warp_idx_, ///< ID of warp within threadblock
int lane_idx_ ///< Id of thread within warp
):
thread_idx(thread_idx_),
warp_idx(warp_idx_),
lane_idx(lane_idx_)
{
// warp offsetting calculations
warp_offset = warp_idx * WarpShape::kM * WarpShape::kN;
int warp_id_mn = warp_idx % (WarpCount::kM * WarpShape::kN);
warp_m = warp_id_mn % WarpCount::kM;
warp_n = warp_id_mn / WarpCount::kM;
MatrixCoord warp_offset_coord(warp_m*WarpShape::kM, warp_n*WarpShape::kN);
// thread offsetting calculations
int quad = (lane_idx >> 2);
int lane_in_quad = (lane_idx & 3);
// this seems to be te correct layout
tid_m = quad;
tid_n = 2 * lane_in_quad;
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
if (!output_op.is_source_needed()) {
compute_source_not_needed_(output_op, destination_iterator, accumulators);
}
else {
compute_source_needed_(output_op, destination_iterator, accumulators, source_iterator);
}
}
private:
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
const int kAccumBlockN = 2;
const int kThreadsM = 8;
const int kThreadsN = 4;
const int kBlockM = WarpShape::kM / kThreadsM;
/// Array type used to output
using OutputAccessType = AlignedArray<ElementOutput, kAccumBlockN>;
/// Array type passed to the output operator - unused elements are optimized away
using OutputFragmentType = Array<ElementOutput, OutputOp::kCount>;
/// Array type used by output functor
using AccumulatorAccessType = Array<ElementAccumulator, kAccumBlockN>;
/// Array type used by output functor
using AccumulatorFragmentType = Array<ElementAccumulator, OutputOp::kCount>;
AccumulatorAccessType const *accumulator_pair = reinterpret_cast<AccumulatorAccessType const *>(&accumulators);
CUTLASS_PRAGMA_UNROLL
for (int accum_m_idx = 0; accum_m_idx < WarpShape::kM / kThreadsM; accum_m_idx++) {
int accum_m = kThreadsM * accum_m_idx;
int mL = destination_iterator.threadblock_offset.row() + WarpShape::kM * warp_m + tid_m + accum_m;
int nL_base = destination_iterator.threadblock_offset.column() + WarpShape::kN * warp_n + tid_n;
ElementOutput *output_ptr = destination_iterator.pointer + mL * destination_iterator.stride;
ElementOutput *source_ptr = source_iterator.pointer + mL * source_iterator.stride;
int const kIterationsN = WarpShape::kN / kThreadsN / kAccumBlockN;
CUTLASS_PRAGMA_UNROLL
for (int accum_n_idx = 0; accum_n_idx < kIterationsN; accum_n_idx++) {
int accum_idx = accum_m_idx + kBlockM * accum_n_idx;
int accum_n = kThreadsM * accum_n_idx;
// mL and nL are logical coordinate in 2D mapping of epilogue's 4D output
int nL = nL_base + accum_n;
bool guard = (mL < destination_iterator.extent.row()) && (nL < destination_iterator.extent.column());
AccumulatorFragmentType accum_fragment;
reinterpret_cast<AccumulatorAccessType &>(accum_fragment) = accumulator_pair[accum_idx];
OutputFragmentType output_fragment;
if(guard) {
reinterpret_cast<OutputAccessType &>(output_fragment) =
*reinterpret_cast<OutputAccessType const *>(source_ptr + nL);
}
// Perform output operator
output_fragment = output_op(accum_fragment, output_fragment);
if(guard) {
// Store
*reinterpret_cast<OutputAccessType *>(output_ptr + nL) = reinterpret_cast<OutputAccessType const &>(output_fragment);
}
}
}
}
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
const int kAccumBlockN = 2;
const int kThreadsM = 8;
const int kThreadsN = 4;
const int kBlockM = WarpShape::kM / kThreadsM;
/// Array type used to output
using OutputAccessType = AlignedArray<ElementOutput, kAccumBlockN>;
/// Array type passed to the output operator - unused elements are optimized away
using OutputFragmentType = Array<ElementOutput, OutputOp::kCount>;
/// Array type used by output functor
using AccumulatorAccessType = Array<ElementAccumulator, kAccumBlockN>;
/// Array type used by output functor
using AccumulatorFragmentType = Array<ElementAccumulator, OutputOp::kCount>;
AccumulatorAccessType const *accumulator_pair = reinterpret_cast<AccumulatorAccessType const *>(&accumulators);
CUTLASS_PRAGMA_UNROLL
for (int accum_m_idx = 0; accum_m_idx < WarpShape::kM / kThreadsM; accum_m_idx++) {
int accum_m = kThreadsM * accum_m_idx;
int mL = destination_iterator.threadblock_offset.row() + WarpShape::kM * warp_m + tid_m + accum_m;
int nL_base = destination_iterator.threadblock_offset.column() + WarpShape::kN * warp_n + tid_n;
ElementOutput *output_ptr = destination_iterator.pointer + mL * destination_iterator.stride;
int const kIterationsN = WarpShape::kN / kThreadsN / kAccumBlockN;
CUTLASS_PRAGMA_UNROLL
for (int accum_n_idx = 0; accum_n_idx < kIterationsN; accum_n_idx++) {
int accum_idx = accum_m_idx + kBlockM * accum_n_idx;
int accum_n = kThreadsM * accum_n_idx;
// mL and nL are logical coordinate in 2D mapping of epilogue's 4D output
int nL = nL_base + accum_n;
bool guard = (mL < destination_iterator.extent.row()) && (nL < destination_iterator.extent.column());
AccumulatorFragmentType accum_fragment;
reinterpret_cast<AccumulatorAccessType &>(accum_fragment) = accumulator_pair[accum_idx];
OutputFragmentType output_fragment;
// Perform output operator
output_fragment = output_op(accum_fragment);
if(guard) {
// Store
*reinterpret_cast<OutputAccessType *>(output_ptr + nL) =
reinterpret_cast<OutputAccessType const &>(output_fragment);
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_direct_store.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_direct_store.h",
"repo_id": "include",
"token_count": 4855
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a class for using IEEE half-precision floating-point types in host or
device code.
*/
#pragma once
// FP8 types are available starting CUDA 11.8+
#if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))
#define CUDA_FP8_ENABLED 1
#endif
#if defined(__CUDA_ARCH__)
# if (__CUDA_ARCH__ >= 900)
# if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))
# define CUDA_PTX_FP8_CVT_ENABLED 1
# endif // (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))
# elif (__CUDA_ARCH__ == 890)
# if (__CUDACC_VER_MAJOR__ > 12) || ((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ >= 1))
# define CUDA_PTX_FP8_CVT_ENABLED 1
# endif // (__CUDACC_VER_MAJOR__ > 12) || ((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ >= 1))
# endif // (__CUDA_ARCH__ >= 900)
#endif // defined(__CUDA_ARCH__)
#ifdef __GNUC__
// Ignore checks on reinterpret-casts that are being used for bitcasts.
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDACC_RTC__)
#include "cutlass/floating_point_nvrtc.h"
#else
//
// Standard Library headers belong here to avoid conflicts with NVRTC.
//
#include <cmath>
#include <limits>
#include <cstdint>
#include <cstring>
#endif
#ifdef CUDA_FP8_ENABLED
#include <cuda_fp8.h>
#endif
#include <cuda_fp16.h>
#include "cutlass/cutlass.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// FP8 Has 2 encodings possible : E4M3 and E5M2
//
// E4M3 : 7 | 6 5 4 3 | 2 1 0
// E5M2 : 7 | 6 5 4 3 2 | 1 0
//
///////////////////////////////////////////////////////////////////////////////////////////////////
enum class FloatEncoding {
E4M3,
E5M2
};
template<FloatEncoding T>
struct alignas(1) float8_base {
static constexpr bool IS_E4M3 = (T == FloatEncoding::E4M3);
static constexpr bool IS_E5M2 = (T == FloatEncoding::E5M2);
// Number of Bits representing mantissa and exponents
static constexpr int FP32_NUM_BITS = 32;
static constexpr int FP32_NUM_EXPONENT_BITS = 8;
static constexpr int FP32_NUM_MANTISSA_BITS = 23;
static constexpr uint32_t FP32_NAN = 0x7fffffff;
static constexpr uint32_t FP32_INFINITY_MASK = 0x7f800000;
static constexpr int FP32_MAX_EXPONENT = 127;
static constexpr int FP32_MIN_EXPONENT = -126;
static constexpr int FP32_EXPONENT_BIAS = 127;
static constexpr int FP16_NUM_BITS = 16;
static constexpr int FP16_NUM_EXPONENT_BITS = 5;
static constexpr int FP16_NUM_MANTISSA_BITS = 10;
static constexpr uint16_t FP16_NAN = 0x7fff;
static constexpr uint16_t FP16_INFINITY_MASK = 0x7c00;
static constexpr int FP16_MAX_EXPONENT = 15;
static constexpr int FP16_MIN_EXPONENT = -14;
static constexpr int FP16_EXPONENT_BIAS = 15;
static constexpr int FP8_NUM_BITS = 8;
static constexpr int FP8_NUM_EXPONENT_BITS = IS_E4M3 ? 4 : 5;
static constexpr int FP8_NUM_MANTISSA_BITS = IS_E4M3 ? 3 : 2;
static constexpr uint8_t FP8_NAN = 0x7f; // Also F8_INF
static constexpr uint8_t FP8_INFINITY_MASK = IS_E4M3 ? 0x78 : 0x7c;
static constexpr int FP8_MAX_EXPONENT = IS_E4M3 ? 7 : 15;
static constexpr int FP8_MIN_EXPONENT = IS_E4M3 ? -6 : -14;
static constexpr int FP8_EXPONENT_BIAS = IS_E4M3 ? 7 : 15;
static constexpr uint8_t FP8_EXPONENT_MASK = (1 << FP8_NUM_EXPONENT_BITS) - 1;
static constexpr uint8_t FP8_MANTISSA_MASK = (1 << FP8_NUM_MANTISSA_BITS) - 1;
static constexpr uint8_t FP8_MAX_FLT = (IS_E4M3 ? 0x7e : 0x7b);
// 256 in float
static constexpr uint32_t FP8_SAT_VAL_FP32 = 0x43800000;
//
// Data members
//
/// Data container
uint8_t storage;
/// Ctors.
CUTLASS_HOST_DEVICE
float8_base() : storage(0) { }
/// Is finite implementation
CUTLASS_HOST_DEVICE
static bool isfinite(float flt) {
uint32_t s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<uint32_t const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
return (s & 0x7f800000) < 0x7f800000;
}
/// Is NaN implementation
CUTLASS_HOST_DEVICE
static bool isnan(float flt) {
uint32_t s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<uint32_t const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
return (s & 0x7fffffff) > 0x7f800000;
}
/// Is infinite implementation
CUTLASS_HOST_DEVICE
static bool isinf(float flt) {
uint32_t s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<uint32_t const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
// Sign = 0 for +inf, 1 for -inf
// Exponent = all ones
// Mantissa = all zeros
return (s == 0x7f800000) || (s == 0xff800000);
}
/// FP32 -> FP8 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static uint8_t convert_float_to_fp8(float const& flt) {
// software implementation rounds toward nearest even
uint32_t s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<uint32_t const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
// Extract the bits in the FP32 type
uint8_t sign = uint8_t((s >> 24 & 0x80));
int32_t exp = int32_t((s >> FP32_NUM_MANTISSA_BITS) & 0xff) - FP32_EXPONENT_BIAS;
int mantissa = s & 0x7fffff;
uint8_t u = 0;
uint8_t const kF8_NaN = 0x7f;
// NaN => NaN
if (isnan(flt)) {
return kF8_NaN;
}
// Inf => MAX_FLT (satfinite)
if (isinf(flt)) {
return sign | FP8_MAX_FLT;
}
// Special handling
if (exp == -128) {
// int8 range is from -128 to 127
// So 255(inf) - 127(bias) = 128 - will show up as -128
// satfinite
return (sign | FP8_MAX_FLT);
}
int sticky_bit = 0;
bool skip_sign = false;
bool may_be_nan = false;
if ( (exp >= FP8_MIN_EXPONENT) && (exp <= FP8_MAX_EXPONENT) ) {
// normal fp32 to normal fp8
exp = exp + FP8_EXPONENT_BIAS;
u = uint8_t((uint32_t(exp) & FP8_EXPONENT_MASK) << FP8_NUM_MANTISSA_BITS);
u = uint8_t(u | (mantissa >> (FP32_NUM_MANTISSA_BITS - FP8_NUM_MANTISSA_BITS)));
} else if(exp < FP8_MIN_EXPONENT) {
// normal single-precision to subnormal float8-precision representation
int rshift = (FP8_MIN_EXPONENT - exp);
if (rshift < FP32_NUM_BITS) {
mantissa |= (1 << FP32_NUM_MANTISSA_BITS);
sticky_bit = ((mantissa & ((1 << rshift) - 1)) != 0);
mantissa = (mantissa >> rshift);
u = (uint8_t(mantissa >> (FP32_NUM_MANTISSA_BITS- FP8_NUM_MANTISSA_BITS)) & FP8_MANTISSA_MASK);
} else {
mantissa = 0;
u = 0;
}
// Exponent > FP8_MAX_EXPONENT - this is a special case done to match HW
// 0x4380_0000 to 0x43e0_0000 - maps from 256 to 448, and does not saturate / inf.
} else {
if( exp == (FP8_MAX_EXPONENT + 1) ) {
uint8_t mantissa_tmp = uint8_t(mantissa >> (FP32_NUM_MANTISSA_BITS - FP8_NUM_MANTISSA_BITS));
if( mantissa_tmp < FP8_MANTISSA_MASK) {
exp = exp + FP8_EXPONENT_BIAS;
u = uint8_t(uint32_t(exp) << FP8_NUM_MANTISSA_BITS) | mantissa_tmp;
may_be_nan = (mantissa_tmp == (FP8_MANTISSA_MASK-1));
} else {
// satfinite
return (sign | FP8_MAX_FLT);
}
} else{
// satfinite
return (sign | FP8_MAX_FLT);
}
}
// round to nearest even
int NUM_BITS_SHIFT = FP32_NUM_MANTISSA_BITS - (FP8_NUM_MANTISSA_BITS + 1);
int round_bit = ((mantissa >> NUM_BITS_SHIFT) & 1);
sticky_bit |= ((mantissa & ((1 << NUM_BITS_SHIFT) - 1)) != 0);
if ((round_bit && sticky_bit) || (round_bit && (u & 1))) {
u = uint8_t(u + 1);
if( may_be_nan ) {
skip_sign = true;
}
}
if (u > FP8_MAX_FLT) {
// satfinite
u = (sign | FP8_MAX_FLT);
}
if( ! skip_sign ) {
u |= sign;
}
return u;
}
/// Converts a fp8 value stored as a uint8_t to a float
CUTLASS_HOST_DEVICE
static float convert_fp8_to_float(uint8_t const& x) {
uint32_t constexpr kF32_NaN = 0x7fffffff;
uint8_t const &f8 = x;
uint32_t sign = (f8 >> (FP8_NUM_BITS - 1)) & 1;
uint32_t exp = (f8 >> FP8_NUM_MANTISSA_BITS) & FP8_EXPONENT_MASK;
uint32_t mantissa = f8 & FP8_MANTISSA_MASK;
unsigned f = (sign << (FP32_NUM_BITS-1));
if (IS_E4M3 && exp == 15 && mantissa == 0x7) {
f = kF32_NaN;
}
else if (exp > 0 && (IS_E4M3 || exp < (FP8_MAX_EXPONENT + FP8_EXPONENT_BIAS + 1))) {
// normal
exp += (FP32_EXPONENT_BIAS - FP8_EXPONENT_BIAS);
f = f |
(exp << FP32_NUM_MANTISSA_BITS) |
(mantissa << (FP32_NUM_MANTISSA_BITS-FP8_NUM_MANTISSA_BITS));
} else if (exp == 0) {
if (mantissa) {
// subnormal
exp += (FP32_EXPONENT_BIAS - FP8_EXPONENT_BIAS) + 1;
while ((mantissa & (1 << FP8_NUM_MANTISSA_BITS)) == 0) {
mantissa <<= 1;
exp--;
}
mantissa &= FP8_MANTISSA_MASK;
f = f |
(exp << FP32_NUM_MANTISSA_BITS) |
(mantissa << (FP32_NUM_MANTISSA_BITS-FP8_NUM_MANTISSA_BITS));
} else {
// sign-preserving zero
}
} else {
if(mantissa == 0){
// Sign-preserving infinity
f = (f | 0x7f800000);
} else {
// Canonical NaN
f = kF32_NaN;
}
}
#if defined(__CUDA_ARCH__)
return reinterpret_cast<float const&>(f);
#else
float flt;
std::memcpy(&flt, &f, sizeof(flt));
return flt;
#endif
}
};
// Forward declaration of float_e5m2_t to define float_e4m3_t <=> float_e5m2_t
// conversions in class float_e4m3_t
struct float_e5m2_t;
///////////////////////////////////////////////////////////////
///
/// floating-point 8 type : E4M3
///
///////////////////////////////////////////////////////////////
struct alignas(1) float_e4m3_t : float8_base<FloatEncoding::E4M3> {
using Base = float8_base<FloatEncoding::E4M3>;
static constexpr int MAX_EXPONENT = Base::FP8_MAX_EXPONENT;
//
// Static conversion operators
//
/// Constructs from an uint8_t
CUTLASS_HOST_DEVICE
static float_e4m3_t bitcast(uint8_t x) {
float_e4m3_t f;
f.storage = x;
return f;
}
/// FP32 -> FP8 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static float_e4m3_t from_float(float const& flt) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t tmp;
float y = float();
asm volatile("cvt.rn.satfinite.e4m3x2.f32 %0, %1, %2;" : "=h"(tmp) : "f"(y), "f"(flt));
return *reinterpret_cast<float_e4m3_t *>(&tmp);
#else
return bitcast(Base::convert_float_to_fp8(flt));
#endif
}
/// FP16 -> E5M2 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static float_e4m3_t from_half(half const& flt) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t tmp = 0;
uint32_t bits = reinterpret_cast<uint16_t const &>(flt);
asm volatile("cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;" : "=h"(tmp) : "r"(bits));
return *reinterpret_cast<float_e4m3_t *>(&tmp);
#else
return bitcast(Base::convert_float_to_fp8(__half2float(flt)));
#endif
}
// E4M3 -> half
CUTLASS_HOST_DEVICE
static half to_half(float_e4m3_t const& x) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t bits = x.storage;
uint32_t packed;
asm volatile("cvt.rn.f16x2.e4m3x2 %0, %1;\n" : "=r"(packed) : "h"(bits));
return reinterpret_cast<half2 const &>(packed).x;
#else
return __float2half(Base::convert_fp8_to_float(x.storage));
#endif
}
// E4M3 -> Float
CUTLASS_HOST_DEVICE
static float to_float(float_e4m3_t const& x) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t bits = x.storage;
uint32_t packed;
asm volatile("cvt.rn.f16x2.e4m3x2 %0, %1;\n" : "=r"(packed) : "h"(bits));
return __half2float(reinterpret_cast<half2 const &>(packed).x);
#else
return Base::convert_fp8_to_float(x.storage);
#endif
}
//
// Methods
//
/// Constructor inheritance
using Base::Base;
/// Default constructor
float_e4m3_t() = default;
#ifdef CUDA_FP8_ENABLED
/// Conversion from CUDA's FP8 type
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(__nv_fp8_e4m3 x) {
storage = x.__x;
}
#endif
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(float x) {
storage = from_float(x).storage;
}
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(half x) {
storage = from_half(x).storage;
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(double x): float_e4m3_t(float(x)) {
}
/// Integer conversion
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(int x): float_e4m3_t(float(x)) {
}
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(unsigned x): float_e4m3_t(float(x)) {
}
/// E5M2 conversion. Defined after float_e5m2_t is defined.
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(float_e5m2_t x);
#ifdef CUDA_FP8_ENABLED
/// Assignment from CUDA's FP8 type
CUTLASS_HOST_DEVICE
float_e4m3_t & operator=(__nv_fp8_e4m3 x) {
storage = x.__x;
return *this;
}
#endif
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
return to_float(*this);
}
/// Converts to half
CUTLASS_HOST_DEVICE
operator half() const {
return to_half(*this);
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(to_float(*this));
}
/// Converts to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
#if defined(__CUDA_ARCH__)
return __half2int_rn(to_half(*this));
#else
return int(to_float(*this));
#endif
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
#if defined(__CUDA_ARCH__)
return bool(__half2int_rn(to_half(*this)));
#else
return bool(int(to_float(*this)));
#endif
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint8_t& raw() {
return storage;
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint8_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((storage & (1 << (Base::FP8_NUM_BITS - 1))) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((storage >> FP8_NUM_MANTISSA_BITS) & Base::FP8_EXPONENT_MASK);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 15;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(storage & Base::FP8_MANTISSA_MASK);
}
};
///////////////////////////////////////////////////////////////
///
/// floating-point 8 type : E5M2
///
///////////////////////////////////////////////////////////////
struct alignas(1) float_e5m2_t : float8_base<FloatEncoding::E5M2> {
using Base = float8_base<FloatEncoding::E5M2>;
static constexpr int MAX_EXPONENT = Base::FP8_MAX_EXPONENT;
//
// Static conversion operators
//
/// Constructs from an uint8_t
CUTLASS_HOST_DEVICE
static float_e5m2_t bitcast(uint8_t x) {
float_e5m2_t f;
f.storage = x;
return f;
}
/// FP32 -> FP8 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static float_e5m2_t from_float(float const& flt) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t tmp;
float y = float();
asm volatile("cvt.rn.satfinite.e5m2x2.f32 %0, %1, %2;" : "=h"(tmp) : "f"(y), "f"(flt));
return *reinterpret_cast<float_e5m2_t *>(&tmp);
#else
return bitcast(Base::convert_float_to_fp8(flt));
#endif
}
/// FP16 -> E5M2 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static float_e5m2_t from_half(half const& flt) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t tmp = 0;
uint32_t bits = reinterpret_cast<uint16_t const &>(flt);
asm volatile("cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;" : "=h"(tmp) : "r"(bits));
return *reinterpret_cast<float_e5m2_t *>(&tmp);
#else
return bitcast(Base::convert_float_to_fp8(__half2float(flt)));
#endif
}
// E5M2 -> half
CUTLASS_HOST_DEVICE
static half to_half(float_e5m2_t const& x) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t bits = x.storage;
uint32_t packed;
asm volatile("cvt.rn.f16x2.e5m2x2 %0, %1;\n" : "=r"(packed) : "h"(bits));
return reinterpret_cast<half2 const &>(packed).x;
#else
return __float2half(Base::convert_fp8_to_float(x.storage));
#endif
}
// E5M2 -> Float
CUTLASS_HOST_DEVICE
static float to_float(float_e5m2_t const& x) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t bits = x.storage;
uint32_t packed;
asm volatile("cvt.rn.f16x2.e5m2x2 %0, %1;\n" : "=r"(packed) : "h"(bits));
return __half2float(reinterpret_cast<half2 const &>(packed).x);
#else
return Base::convert_fp8_to_float(x.storage);
#endif
}
//
// Methods
//
/// Constructor inheritance
using Base::Base;
/// Default constructor
float_e5m2_t() = default;
#ifdef CUDA_FP8_ENABLED
/// Conversion from CUDA's FP8 type
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(__nv_fp8_e5m2 x) {
storage = x.__x;
}
#endif
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(float x) {
storage = from_float(x).storage;
}
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(half x) {
storage = from_half(x).storage;
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(double x): float_e5m2_t(float(x)) {
}
/// Integer conversion
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(int x): float_e5m2_t(float(x)) {
}
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(unsigned x): float_e5m2_t(float(x)) {
}
/// E4M3 conversion
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(float_e4m3_t x);
#ifdef CUDA_FP8_ENABLED
/// Assignment from CUDA's FP8 type
CUTLASS_HOST_DEVICE
float_e5m2_t & operator=(__nv_fp8_e5m2 x) {
storage = x.__x;
return *this;
}
#endif
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
return to_float(*this);
}
/// Converts to half
CUTLASS_HOST_DEVICE
operator half() const {
return to_half(*this);
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(to_float(*this));
}
/// Converts to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
#if defined(__CUDA_ARCH__)
return __half2int_rn(to_half(*this));
#else
return int(to_float(*this));
#endif
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
#if defined(__CUDA_ARCH__)
return bool(__half2int_rn(to_half(*this)));
#else
return bool(int(to_float(*this)));
#endif
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint8_t& raw() {
return storage;
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint8_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((storage & (1 << (Base::FP8_NUM_BITS - 1))) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((storage >> FP8_NUM_MANTISSA_BITS) & Base::FP8_EXPONENT_MASK);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 15;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(storage & Base::FP8_MANTISSA_MASK);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Arithmetic operators
//
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool operator==(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) == float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator!=(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) != float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) < float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<=(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) <= float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) > float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>=(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) >= float(rhs);
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator+(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float_e4m3_t(float(lhs) + float(rhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator-(float_e4m3_t const& lhs) {
return float_e4m3_t(-float(lhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator-(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float_e4m3_t(float(lhs) - float(rhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator*(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float_e4m3_t(float(lhs) * float(rhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator/(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float_e4m3_t(float(lhs) / float(rhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator+=(float_e4m3_t & lhs, float_e4m3_t const& rhs) {
lhs = float_e4m3_t(float(lhs) + float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator-=(float_e4m3_t & lhs, float_e4m3_t const& rhs) {
lhs = float_e4m3_t(float(lhs) - float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator*=(float_e4m3_t & lhs, float_e4m3_t const& rhs) {
lhs = float_e4m3_t(float(lhs) * float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator/=(float_e4m3_t & lhs, float_e4m3_t const& rhs) {
lhs = float_e4m3_t(float(lhs) / float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator++(float_e4m3_t & lhs) {
float tmp(lhs);
++tmp;
lhs = float_e4m3_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator--(float_e4m3_t & lhs) {
float tmp(lhs);
--tmp;
lhs = float_e4m3_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator++(float_e4m3_t & lhs, int) {
float_e4m3_t ret(lhs);
float tmp(lhs);
tmp++;
lhs = float_e4m3_t(tmp);
return ret;
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator--(float_e4m3_t & lhs, int) {
float_e4m3_t ret(lhs);
float tmp(lhs);
tmp--;
lhs = float_e4m3_t(tmp);
return ret;
}
CUTLASS_HOST_DEVICE
bool operator==(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) == float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator!=(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) != float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) < float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<=(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) <= float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) > float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>=(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) >= float(rhs);
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator+(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float_e5m2_t(float(lhs) + float(rhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator-(float_e5m2_t const& lhs) {
return float_e5m2_t(-float(lhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator-(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float_e5m2_t(float(lhs) - float(rhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator*(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float_e5m2_t(float(lhs) * float(rhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator/(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float_e5m2_t(float(lhs) / float(rhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator+=(float_e5m2_t & lhs, float_e5m2_t const& rhs) {
lhs = float_e5m2_t(float(lhs) + float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator-=(float_e5m2_t & lhs, float_e5m2_t const& rhs) {
lhs = float_e5m2_t(float(lhs) - float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator*=(float_e5m2_t & lhs, float_e5m2_t const& rhs) {
lhs = float_e5m2_t(float(lhs) * float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator/=(float_e5m2_t & lhs, float_e5m2_t const& rhs) {
lhs = float_e5m2_t(float(lhs) / float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator++(float_e5m2_t & lhs) {
float tmp(lhs);
++tmp;
lhs = float_e5m2_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator--(float_e5m2_t & lhs) {
float tmp(lhs);
--tmp;
lhs = float_e5m2_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator++(float_e5m2_t & lhs, int) {
float_e5m2_t ret(lhs);
float tmp(lhs);
tmp++;
lhs = float_e5m2_t(tmp);
return ret;
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator--(float_e5m2_t & lhs, int) {
float_e5m2_t ret(lhs);
float tmp(lhs);
tmp--;
lhs = float_e5m2_t(tmp);
return ret;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// float_e4m3_t <=> float_e5m2_t conversions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// float_e4m3_t <= float_e5m2_t
CUTLASS_HOST_DEVICE
float_e4m3_t::float_e4m3_t(float_e5m2_t x) {
storage = from_float(float_e5m2_t::to_float(x)).storage;
}
/// float_e5m2_t <= float_e4m3_t
CUTLASS_HOST_DEVICE
float_e5m2_t::float_e5m2_t(float_e4m3_t x) {
storage = from_float(float_e4m3_t::to_float(x)).storage;
}
///////////////////////////////////////////////////////////////
///
/// Umbrella floating-point 8-bit data type : type_erased_dynamic_float8_t
/// This umbrella datatype can be enabled when a user provides a specific
/// datatype in runtime argument list.
///
/// Currently supported runtime datatypes compatible with type_erased_dynamic_float8_t:
/// QMMAFormat::E5M2
/// QMMAFormat::E4M3
///
///////////////////////////////////////////////////////////////
union type_erased_dynamic_float8_t {
uint8_t data;
cutlass::float_e5m2_t e5m2;
cutlass::float_e4m3_t e4m3;
CUTLASS_HOST_DEVICE
explicit operator cutlass::float_e5m2_t() const {
return e5m2;
}
CUTLASS_HOST_DEVICE
explicit operator cutlass::float_e4m3_t() const {
return e4m3;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Standard Library operations and definitions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
#if !defined(__CUDACC_RTC__)
namespace std {
/// Numeric limits common to all float8 types
template <typename T>
struct float8_base_numeric_limits {
private:
using F8Type = T;
public:
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
static std::float_denorm_style const has_denorm = std::denorm_present;
static bool const has_denorm_loss = true;
static std::float_round_style const round_style = std::round_to_nearest;
static bool const is_iec559 = false;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = F8Type::FP8_NUM_MANTISSA_BITS;
/// Least positive value
CUTLASS_HOST_DEVICE
static F8Type min() { return F8Type::bitcast(0x01); }
/// Maximum finite value
CUTLASS_HOST_DEVICE
static F8Type max() { return F8Type::bitcast(F8Type::FP8_MAX_FLT); }
/// Returns maximum rounding error
CUTLASS_HOST_DEVICE
static F8Type round_error() { return F8Type(0.5f); }
/// Returns positive infinity value
CUTLASS_HOST_DEVICE
static F8Type infinity() { return F8Type::bitcast(F8Type::FP8_INFINITY_MASK); }
/// Returns quiet NaN value
CUTLASS_HOST_DEVICE
static F8Type quiet_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); }
/// Returns signaling NaN value
CUTLASS_HOST_DEVICE
static F8Type signaling_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); }
/// Returns smallest positive subnormal value
CUTLASS_HOST_DEVICE
static F8Type denorm_min() { return F8Type::bitcast(0x01); }
};
/// Numeric limits for float_e4m3_t
template <>
struct numeric_limits<cutlass::float_e4m3_t> :
public float8_base_numeric_limits<cutlass::float_e4m3_t> {
static bool const has_infinity = false;
/// Minimum finite value
static cutlass::float_e4m3_t lowest() { return cutlass::float_e4m3_t::bitcast(0xfe); }
/// Machine epsilon, that is, the difference between 1.0 and the next representable value
static cutlass::float_e4m3_t epsilon() { return cutlass::float_e4m3_t::bitcast(0x20); }
};
/// Numeric limits for float_e5m2_t
template <>
struct numeric_limits<cutlass::float_e5m2_t> :
public float8_base_numeric_limits<cutlass::float_e5m2_t> {
static bool const has_infinity = true;
/// Minimum finite value
static cutlass::float_e5m2_t lowest() { return cutlass::float_e5m2_t::bitcast(0xfb); }
/// Machine epsilon, that is, the difference between 1.0 and the next representable value
static cutlass::float_e5m2_t epsilon() { return cutlass::float_e5m2_t::bitcast(0x34); }
};
} // namespace std
#endif
namespace cutlass {
namespace platform {
/// Numeric limits common to all float8 types
template <typename T>
struct float8_base_numeric_limits {
private:
using F8Type = T;
public:
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
#if !defined(__CUDACC_RTC__)
static std::float_denorm_style const has_denorm = std::denorm_present;
#endif
static bool const has_denorm_loss = true;
#if !defined(__CUDACC_RTC__)
static std::float_round_style const round_style = std::round_to_nearest;
#endif
static bool const is_iec559 = false;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = F8Type::FP8_NUM_MANTISSA_BITS;
/// Least positive value
CUTLASS_HOST_DEVICE
static F8Type min() { return F8Type::bitcast(0x01); }
/// Maximum finite value
CUTLASS_HOST_DEVICE
static F8Type max() { return F8Type::bitcast(F8Type::FP8_MAX_FLT); }
/// Returns maximum rounding error
CUTLASS_HOST_DEVICE
static F8Type round_error() { return F8Type(0.5f); }
/// Returns positive infinity value
CUTLASS_HOST_DEVICE
static F8Type infinity() { return F8Type::bitcast(F8Type::FP8_INFINITY_MASK); }
/// Returns quiet NaN value
CUTLASS_HOST_DEVICE
static F8Type quiet_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); }
/// Returns signaling NaN value
CUTLASS_HOST_DEVICE
static F8Type signaling_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); }
/// Returns smallest positive subnormal value
CUTLASS_HOST_DEVICE
static F8Type denorm_min() { return F8Type::bitcast(0x01); }
};
/// Forward Declaration
template <class T>
struct numeric_limits;
/// Numeric limits for float_e4m3_t
template <>
struct numeric_limits<cutlass::float_e4m3_t> :
public float8_base_numeric_limits<cutlass::float_e4m3_t> {
static bool const has_infinity = false;
/// Minimum finite value
static cutlass::float_e4m3_t lowest() { return cutlass::float_e4m3_t::bitcast(0xfe); }
/// Machine epsilon, that is, the difference between 1.0 and the next representable value
static cutlass::float_e4m3_t epsilon() { return cutlass::float_e4m3_t::bitcast(0x20); }
};
/// Numeric limits for float_e5m2_t
template <>
struct numeric_limits<cutlass::float_e5m2_t> :
public float8_base_numeric_limits<cutlass::float_e5m2_t> {
static bool const has_infinity = true;
/// Minimum finite value
static cutlass::float_e5m2_t lowest() { return cutlass::float_e5m2_t::bitcast(0xfb); }
/// Machine epsilon, that is, the difference between 1.0 and the next representable value
static cutlass::float_e5m2_t epsilon() { return cutlass::float_e5m2_t::bitcast(0x34); }
};
} // namespace platform
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// User-defined literals
//
CUTLASS_HOST_DEVICE
cutlass::float_e4m3_t operator "" _fe4m3(long double x) {
return cutlass::float_e4m3_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::float_e4m3_t operator "" _fe4m3(unsigned long long int x) {
return cutlass::float_e4m3_t(int(x));
}
CUTLASS_HOST_DEVICE
cutlass::float_e5m2_t operator "" _fe5m2(long double x) {
return cutlass::float_e5m2_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::float_e5m2_t operator "" _fe5m2(unsigned long long int x) {
return cutlass::float_e5m2_t(int(x));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/float8.h/0 | {
"file_path": "include/cutlass/float8.h",
"repo_id": "include",
"token_count": 16671
} | 23 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/detail/dependent_false.hpp"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/detail/layout.hpp"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/transform/collective/sm90_wgmma_transpose.hpp"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/trace.h"
#include "cutlass/detail/collective.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include "cute/arch/copy_sm90.hpp"
#include "cute/algorithm/functional.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/atom/copy_traits_sm90_tma.hpp"
#include "cute/algorithm/gemm.hpp"
#include "cute/tensor_predicate.hpp"
#include "cute/numeric/arithmetic_tuple.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
using namespace cute;
/////////////////////////////////////////////////////////////////////////////////////////////////
// WarpSpecialized Mainloop that source A operand from registers
template <
int Stages,
class ClusterShape,
class KernelSchedule,
class TileShape_,
class ElementAOptionalTuple,
class StrideA_,
class ElementBOptionalTuple,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm90TmaGmmaRmemAWarpSpecializedMixedInput<Stages, ClusterShape, KernelSchedule>,
TileShape_,
ElementAOptionalTuple,
StrideA_,
ElementBOptionalTuple,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_>
{
private:
template <class PointerType>
static constexpr auto
get_logical_ptr(PointerType const* ptr) {
if constexpr (cute::sizeof_bits_v<PointerType> < 8) {
return subbyte_iterator<PointerType const>(ptr);
}
else {
return ptr;
}
}
enum class ConversionMode {
DirectConvert,
ConvertAndScale,
ConvertAndScaleWithZero
};
using ScaleA = detail::deduce_mixed_width_dtype_t<1, ElementAOptionalTuple>;
using ScaleB = detail::deduce_mixed_width_dtype_t<1, ElementBOptionalTuple>;
using ZeroA = detail::deduce_mixed_width_dtype_t<2, ElementAOptionalTuple>;
using ZeroB = detail::deduce_mixed_width_dtype_t<2, ElementBOptionalTuple>;
public:
//
// Type Aliases
//
using DispatchPolicy = MainloopSm90TmaGmmaRmemAWarpSpecializedMixedInput<Stages, ClusterShape, KernelSchedule>;
using TileShape = TileShape_;
static_assert(cute::is_tuple<ElementAOptionalTuple>::value ^ cute::is_tuple<ElementBOptionalTuple>::value,
"Either A OR B must be a tuple. It must take the from {ElementOperand, [ElementScale],"
"[ElementZero]}. Inputs in [] are optional.");
using ElementA = detail::deduce_mixed_width_dtype_t<0, ElementAOptionalTuple>;
using ElementB = detail::deduce_mixed_width_dtype_t<0, ElementBOptionalTuple>;
static constexpr bool IsATransformed = cute::is_tuple<ElementAOptionalTuple>::value;
using ElementScale = cute::conditional_t<IsATransformed, ScaleA, ScaleB>;
using ElementZero = cute::conditional_t<IsATransformed, ZeroA, ZeroB>;
// For cases where we can't have a void type, we can use this to allow the code to compile when the scale / zero is void.
using NonVoidElementScale = cute::conditional_t<cute::is_void_v<ElementScale>, float, ElementScale>;
using NonVoidElementZero = cute::conditional_t<cute::is_void_v<ElementZero>, float, ElementZero>;
using StrideA = StrideA_;
using StrideB = StrideB_;
// These are always MN major
using StrideScale = cute::Stride<cute::Int<1>, int64_t, int64_t>;
// For cases where we can't have a void scale, we can use this to allow the code to compile when the scale is void.
using NonVoidStrideScale = cute::conditional_t<
cute::is_void_v<StrideScale>, cute::Stride<_1, int64_t, int64_t>, StrideScale>;
static_assert((IsATransformed && cutlass::gemm::detail::is_k_major<StrideA>()) ||
(!IsATransformed && cutlass::gemm::detail::is_k_major<StrideB>()),
"The transformed type must be K-major.");
static_assert(( IsATransformed && (sizeof(ElementB) == 2)) ||
(!IsATransformed && (sizeof(ElementA) == 2)) ||
(cutlass::gemm::detail::is_k_major<StrideA>() &&
cutlass::gemm::detail::is_k_major<StrideB>()),
"The unscaled element must be 2 bytes OR both inputs must be K-major");
static_assert(cutlass::gemm::detail::is_mn_major<NonVoidStrideScale>(),
"Scale must be MN major [Col Major if A is scaled, Row Major if B is scaled].");
using CtaShape_MNK = decltype(shape_div(TileShape{}, ClusterShape{}));
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using GmemTiledCopyScale = cute::SM90_TMA_LOAD;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
// Scale layout atom set after swapping.
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using SmemCopyAtomScale = Copy_Atom<cute::DefaultCopy, NonVoidElementScale>;
// We must ensure the type to be scaled goes to RF
static constexpr bool SwapAB = !IsATransformed;
using InternalSmemLayoutAtomA = cute::conditional_t<!SwapAB, SmemLayoutAtomA, SmemLayoutAtomB>;
using InternalSmemLayoutAtomB = cute::conditional_t<!SwapAB, SmemLayoutAtomB, SmemLayoutAtomA>;
using InternalSmemCopyAtomA = cute::conditional_t<!SwapAB, SmemCopyAtomA, SmemCopyAtomB>;
using InternalSmemCopyAtomB = cute::conditional_t<!SwapAB, SmemCopyAtomB, SmemCopyAtomA>;
// TMA converts f32 input to tf32 when copying from GMEM to SMEM
// For all other types, cast to size equivalent uint type to avoid any rounding by TMA.
static constexpr bool ConvertF32toTF32A = cute::is_same_v<float, ElementA>;
static constexpr bool ConvertF32toTF32B = cute::is_same_v<float, ElementB>;
using ConvertedElementA = cute::conditional_t<ConvertF32toTF32A, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementA>>>;
using ConvertedElementB = cute::conditional_t<ConvertF32toTF32B, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementB>>>;
using RealInternalElementA = cute::conditional_t<!SwapAB, ElementA, ElementB>;
using RealInternalElementB = cute::conditional_t<!SwapAB, ElementB, ElementA>;
using InternalElementA = cute::conditional_t<!SwapAB, ConvertedElementA, ConvertedElementB>;
using InternalElementB = cute::conditional_t<!SwapAB, ConvertedElementB, ConvertedElementA>;
using InternalStrideA = cute::conditional_t<!SwapAB, StrideA, StrideB>;
using InternalStrideB = cute::conditional_t<!SwapAB, StrideB, StrideA>;
using TransformA = TransformA_;
using TransformB = TransformB_;
using InternalTransformA = cute::conditional_t<!SwapAB, TransformA, TransformB>;
using InternalTransformB = cute::conditional_t<!SwapAB, TransformB, TransformA>;
static constexpr int IsSubbyteA = cute::sizeof_bits_v<InternalElementA> < 8;
using TmaElementA = cute::conditional_t<IsSubbyteA, uint8_t, InternalElementA>;
using ArchTag = typename DispatchPolicy::ArchTag;
using MainloopPipeline = cutlass::PipelineTmaAsync<
DispatchPolicy::Stages>;
using PipelineState = cutlass::PipelineState<DispatchPolicy::Stages>;
using PipelineParams = typename MainloopPipeline::Params;
using SmemLayoutAtomScale = Layout<Shape<decltype(cute::shape<0>(InternalSmemLayoutAtomA{})), cute::Int<1>>>;
using ScaleTileShape = decltype(make_shape(shape<0>(TileShape{}), shape<1>(SmemLayoutAtomScale{})));
static_assert(cute::rank(InternalSmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(InternalSmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(InternalSmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(InternalSmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(InternalSmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(InternalSmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(rank(SmemLayoutAtomScale{}) == 2, "SmemLayoutAtomScale must be rank 2");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomScale{})) == 0, "SmemLayoutAtomScale must equal the tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomScale{})) == 0, "SmemLayoutAtomScale must evenly divide tile k shape.");
// Tile along modes in a way that maximizes the TMA box size.
using SmemLayoutA = decltype(tile_to_shape(
InternalSmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}),
cute::conditional_t< ::cutlass::gemm::detail::is_major<0,InternalStrideA>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{}));
using SmemLayoutB = decltype(tile_to_shape(
InternalSmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}),
cute::conditional_t< ::cutlass::gemm::detail::is_major<0,InternalStrideB>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{}));
// It is assumed that the scales and zero-points share the same smem layout
using SmemLayoutScale = decltype(tile_to_shape(
SmemLayoutAtomScale{},
make_shape(shape<0>(ScaleTileShape{}), shape<1>(ScaleTileShape{}), Int<Stages>{}),
cute::conditional_t< ::cutlass::gemm::detail::is_major<0,NonVoidStrideScale>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{}));
static_assert(DispatchPolicy::Stages >= 2, "Specialization requires Stages set to value 2 or more.");
static_assert(not cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeA>::value &&
cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeB>::value,
"MMA atom must source A from rmem and B operand from smem_desc for this mainloop.");
static_assert(cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>,
"GmemTiledCopy - invalid SM90 TMA copy atom specified.");
static_assert(cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>,
"GmemTiledCopy - invalid SM90 TMA copy atom specified.");
// To relax them, we need to handle loading more than 1 row of scales for every main loop iteration.
// We must also handle updating the pipeline transaction bytes on the fly.
// NOTE: Deleting this assertion without required changes will cause the code to hang.
static_assert(size<1>(SmemLayoutAtomScale{}) == 1, "size<1>(SmemLayoutAtomScale) must be 1.");
private:
static constexpr ConversionMode
get_conversion_mode() {
if constexpr (cute::is_void_v<ElementScale>) {
return ConversionMode::DirectConvert;
}
else if constexpr (cute::is_void_v<ElementZero>) {
return ConversionMode::ConvertAndScale;
}
else {
return ConversionMode::ConvertAndScaleWithZero;
}
}
static constexpr ConversionMode KernelConversionMode = get_conversion_mode();
static constexpr bool ModeHasScales = KernelConversionMode == ConversionMode::ConvertAndScale ||
KernelConversionMode == ConversionMode::ConvertAndScaleWithZero;
static constexpr auto
elements_per_smem_scale() {
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
return 0;
}
else if constexpr (ModeHasScales) {
return cute::cosize_v<SmemLayoutScale>;
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Type not handled in scale smem allocation.");
}
}
static constexpr auto
elements_per_smem_zero() {
if constexpr (KernelConversionMode == ConversionMode::DirectConvert ||
KernelConversionMode == ConversionMode::ConvertAndScale ) {
return 0;
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
return cute::cosize_v<SmemLayoutScale>;
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Type not handled in scale smem allocation.");
}
}
// These methods use some the public members of the class. For that reason, we define them after the public section.
static constexpr uint32_t
compute_tma_transaction_bytes_mk() {
constexpr uint32_t baseline_bytes = cutlass::bits_to_bytes(size<0>(SmemLayoutA{}) * size<1>(SmemLayoutA{}) * static_cast<uint32_t>(cute::sizeof_bits_v<InternalElementA>));
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
return baseline_bytes;
}
else if constexpr (ModeHasScales) {
constexpr uint32_t scale_tx_bytes = cutlass::bits_to_bytes(size<0>(SmemLayoutScale{}) * size<1>(SmemLayoutScale{}) * static_cast<uint32_t>(cute::sizeof_bits_v<ElementScale>));
static_assert(scale_tx_bytes % 128 == 0, "Each scale stage must be 128B aligned."); // required by TMA
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
return baseline_bytes + scale_tx_bytes;
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
// Scale and zero share smem layout
constexpr uint32_t zero_tx_bytes = cutlass::bits_to_bytes(size<0>(SmemLayoutScale{}) * size<1>(SmemLayoutScale{}) * static_cast<uint32_t>(cute::sizeof_bits_v<ElementZero>));
static_assert(zero_tx_bytes % 128 == 0, "Each zero stage must be 128B aligned."); // required by TMA
return baseline_bytes + scale_tx_bytes + zero_tx_bytes;
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Type not handled in tma transaction bytes computation.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Type not handled in tma transaction bytes computation.");
}
}
static constexpr uint32_t
compute_tma_transaction_bytes_nk() {
return cutlass::bits_to_bytes(size<0>(SmemLayoutB{}) * size<1>(SmemLayoutB{}) * static_cast<uint32_t>(cute::sizeof_bits_v<InternalElementB>));
}
public:
static constexpr size_t SmemAlignmentA = cutlass::detail::alignment_for_swizzle(SmemLayoutA{});
static constexpr size_t SmemAlignmentB = cutlass::detail::alignment_for_swizzle(SmemLayoutB{});
// Just pick the max alignment of A and B since it is required to be at least 128B
static constexpr size_t SmemAlignmentScale = cute::max(SmemAlignmentA, SmemAlignmentB);
static_assert(SmemAlignmentA >= 128 and SmemAlignmentB >= 128, "Require at least 128B alignment");
struct SharedStorage
{
static constexpr int scale_elements = elements_per_smem_scale();
static constexpr int zero_elements = elements_per_smem_zero();
struct TensorStorage : cute::aligned_struct<cute::max(SmemAlignmentA, SmemAlignmentB)> {
cute::ArrayEngine<RealInternalElementA, cute::cosize_v<SmemLayoutA>> smem_A;
cute::ArrayEngine<typename TiledMma::ValTypeB, cute::cosize_v<SmemLayoutB>> smem_B;
cute::ArrayEngine<NonVoidElementScale, scale_elements> smem_scale;
cute::ArrayEngine<NonVoidElementZero, zero_elements> smem_zero;
} tensors;
using PipelineStorage = typename MainloopPipeline::SharedStorage;
PipelineStorage pipeline;
};
using TensorStorage = typename SharedStorage::TensorStorage;
using PipelineStorage = typename SharedStorage::PipelineStorage;
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A = nullptr;
StrideA dA{};
ElementB const* ptr_B = nullptr;
StrideB dB{};
ElementScale const* ptr_S = nullptr;
NonVoidStrideScale dS{};
int group_size = 0;
ElementZero const* ptr_Z = nullptr;
uint32_t mma_promotion_interval = 4;
};
// Device side kernel params
struct Params {
private:
using Outer = CollectiveMma<DispatchPolicy, TileShape_,
ElementAOptionalTuple, StrideA_,
ElementBOptionalTuple, StrideB_,
TiledMma_,
GmemTiledCopyA_, SmemLayoutAtomA_, SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_, SmemLayoutAtomB_, SmemCopyAtomB_,
TransformB_>;
public:
// Assumption: StrideA is congruent with Problem_MK
using TMA_A = decltype(make_tma_copy<TmaElementA>(
GmemTiledCopyA{},
make_tensor(Outer::get_logical_ptr(static_cast<InternalElementA const*>(nullptr)), repeat_like(InternalStrideA{}, int32_t(0)), InternalStrideA{}),
SmemLayoutA{}(_,_,cute::Int<0>{}),
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})),
size<1>(ClusterShape{}))); // mcast along N mode for this M load, if any
using TMA_Scale = decltype(make_tma_copy(
GmemTiledCopyScale{},
make_tensor(Outer::get_logical_ptr(static_cast<NonVoidElementScale const*>(nullptr)), repeat_like(NonVoidStrideScale{}, int32_t(0)), NonVoidStrideScale{}),
SmemLayoutScale{}(_,_,cute::Int<0>{}),
ScaleTileShape{},
_1{})); // mcast along N mode for this M load, if any. Scale is ALWAYS loaded with A for RF kernel
using TMA_Zero = decltype(make_tma_copy(
GmemTiledCopyScale{},
make_tensor(Outer::get_logical_ptr(static_cast<NonVoidElementZero const*>(nullptr)), repeat_like(NonVoidStrideScale{}, int32_t(0)), NonVoidStrideScale{}),
SmemLayoutScale{}(_,_,cute::Int<0>{}),
ScaleTileShape{},
_1{})); // mcast along N mode for this M load, if any. Scale is ALWAYS loaded with A for RF kernel
// Assumption: StrideB is congruent with Problem_NK
using TMA_B = decltype(make_tma_copy(
GmemTiledCopyB{},
make_tensor(Outer::get_logical_ptr(static_cast<InternalElementB const*>(nullptr)), repeat_like(InternalStrideB{}, int32_t(0)), InternalStrideB{}),
SmemLayoutB{}(_,_,cute::Int<0>{}),
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})),
size<0>(ClusterShape{}))); // mcast along M mode for this N load, if any
TMA_A tma_load_a;
TMA_B tma_load_b;
TMA_Scale tma_load_scale;
TMA_Zero tma_load_zero;
int64_t scale_k;
int group_size;
uint32_t tma_transaction_bytes = TmaTransactionBytes;
uint32_t tma_transaction_bytes_mk = TmaTransactionBytesMK;
uint32_t tma_transaction_bytes_nk = TmaTransactionBytesNK;
};
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
(void) workspace;
// Optionally append 1s until problem shape is rank-4 (MNKL), in case it is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
if constexpr (SwapAB) {
M = get<1>(problem_shape_MNKL);
N = get<0>(problem_shape_MNKL);
}
InternalElementA const* ptr_A;
InternalStrideA dA;
InternalElementB const* ptr_B;
InternalStrideB dB;
if constexpr (not SwapAB) {
ptr_A = reinterpret_cast<InternalElementA const*>(args.ptr_A);
ptr_B = reinterpret_cast<InternalElementB const*>(args.ptr_B);
dA = args.dA;
dB = args.dB;
}
else {
ptr_A = reinterpret_cast<InternalElementA const*>(args.ptr_B);
ptr_B = reinterpret_cast<InternalElementB const*>(args.ptr_A);
dA = args.dB;
dB = args.dA;
}
Tensor tensor_a = make_tensor(get_logical_ptr(ptr_A), make_layout(make_shape(M,K,L), dA));
Tensor tensor_b = make_tensor(get_logical_ptr(ptr_B), make_layout(make_shape(N,K,L), dB));
typename Params::TMA_A tma_load_a = make_tma_copy<TmaElementA>(
GmemTiledCopyA{},
tensor_a,
SmemLayoutA{}(_,_,cute::Int<0>{}),
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})),
size<1>(ClusterShape{})); // mcast along N mode for this M load, if any
typename Params::TMA_B tma_load_b = make_tma_copy(
GmemTiledCopyB{},
tensor_b,
SmemLayoutB{}(_,_,cute::Int<0>{}),
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})),
size<0>(ClusterShape{})); // mcast along M mode for this N load, if any
typename Params::TMA_Scale tma_load_scale;
typename Params::TMA_Zero tma_load_zero;
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
return { tma_load_a, tma_load_b, tma_load_scale, tma_load_zero, 0, 0, TmaTransactionBytes, TmaTransactionBytesMK, TmaTransactionBytesNK };
}
else if constexpr (ModeHasScales) {
auto scale_k = (K + args.group_size - 1) / args.group_size;
ElementScale const* ptr_S = args.ptr_S;
StrideScale dS = args.dS;
Tensor tensor_scale = make_tensor(get_logical_ptr(ptr_S), make_layout(make_shape(M,scale_k,L), dS));
tma_load_scale = make_tma_copy(
GmemTiledCopyScale{},
tensor_scale,
SmemLayoutScale{}(_,_,cute::Int<0>{}),
ScaleTileShape{},
_1{}); // mcast along N mode for this M load, if any
if constexpr(KernelConversionMode == ConversionMode::ConvertAndScale) {
return { tma_load_a, tma_load_b, tma_load_scale, tma_load_zero, scale_k, args.group_size, TmaTransactionBytes, TmaTransactionBytesMK, TmaTransactionBytesNK };
}
else if constexpr(KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
Tensor tensor_zero = make_tensor(get_logical_ptr(args.ptr_Z), make_layout(make_shape(M,scale_k,L), dS));
tma_load_zero = make_tma_copy(
GmemTiledCopyScale{},
tensor_zero,
SmemLayoutScale{}(_,_,cute::Int<0>{}),
ScaleTileShape{},
_1{}); // mcast along N mode for this M load, if any
return { tma_load_a, tma_load_b, tma_load_scale, tma_load_zero, scale_k, args.group_size, TmaTransactionBytes, TmaTransactionBytesMK, TmaTransactionBytesNK };
} else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in to_underlying_arguments.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in to_underlying_arguments.");
}
}
template<class ProblemShape>
static bool
can_implement(
ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
constexpr int tma_alignment_bits = 128;
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
bool implementable = true;
constexpr int min_tma_aligned_elements_A = tma_alignment_bits / cutlass::sizeof_bits<ElementA>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_A>(cute::make_shape(M,K,L), StrideA{});
constexpr int min_tma_aligned_elements_B = tma_alignment_bits / cutlass::sizeof_bits<ElementB>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_B>(cute::make_shape(N,K,L), StrideB{});
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
implementable = implementable && (args.ptr_S == nullptr);
implementable = implementable && (args.ptr_Z == nullptr);
}
else if constexpr (ModeHasScales) {
const int scale_mn = SwapAB ? N : M;
const int scale_k = (K + args.group_size - 1) / args.group_size;
constexpr int min_tma_aligned_elements_scale = tma_alignment_bits / cutlass::sizeof_bits<ElementScale>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_scale>(cute::make_shape(scale_mn,scale_k,L), StrideScale{});
implementable = implementable && (args.group_size == K || ((args.group_size % size<2>(TileShape{})) == 0));
implementable = implementable && args.group_size != 0;
implementable = implementable && (args.ptr_S != nullptr);
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
implementable = implementable && (args.ptr_Z == nullptr);
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
constexpr int min_tma_aligned_elements_zero = tma_alignment_bits / cutlass::sizeof_bits<ElementZero>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_zero>(cute::make_shape(scale_mn,scale_k,L), StrideScale{});
implementable = implementable && (args.ptr_Z != nullptr);
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in can_implement.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in can_implement.");
}
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n");
}
return implementable;
}
static constexpr int K_PIPE_MAX = DispatchPolicy::Stages;
static constexpr uint32_t TmaTransactionBytesMK = compute_tma_transaction_bytes_mk();
static constexpr uint32_t TmaTransactionBytesNK = compute_tma_transaction_bytes_nk();
static constexpr uint32_t TmaTransactionBytes = TmaTransactionBytesMK + TmaTransactionBytesNK;
/// Issue Tma Descriptor Prefetch -- ideally from a single thread for best performance
CUTLASS_DEVICE
static void prefetch_tma_descriptors(Params const& mainloop_params) {
cute::prefetch_tma_descriptor(mainloop_params.tma_load_a.get_tma_descriptor());
cute::prefetch_tma_descriptor(mainloop_params.tma_load_b.get_tma_descriptor());
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
// Nothing extra to do
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
cute::prefetch_tma_descriptor(mainloop_params.tma_load_scale.get_tma_descriptor());
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
cute::prefetch_tma_descriptor(mainloop_params.tma_load_scale.get_tma_descriptor());
cute::prefetch_tma_descriptor(mainloop_params.tma_load_zero.get_tma_descriptor());
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in TMA prefetch.");
}
}
/// Set up the data needed by this collective for load and mma.
/// Returns a tuple of tensors. The collective and the kernel layer have the contract
/// Returned tuple must contain at least two elements, with the first two elements being:
/// gA_mkl - The tma tensor, A after a local tile so it has shape (BLK_M,BLK_K,m,k,l)
/// gB_nkl - The tma tensor, B after a local tile so it has shape (BLK_N,BLK_K,n,k,l)
/// The rest of the tensors can be specified as needed by this collective.
template <class ProblemShape_MNKL>
CUTLASS_DEVICE auto
load_init(ProblemShape_MNKL const& problem_shape_MNKL, Params const& mainloop_params) const {
using X = Underscore;
// Separate out problem shape for convenience
auto [M,N,K,L] = problem_shape_MNKL;
// TMA requires special handling of strides to deal with coord codomain mapping
// Represent the full tensors -- get these from TMA
Tensor mA_mkl = mainloop_params.tma_load_a.get_tma_tensor(make_shape(M,K,L)); // (m,k,l)
Tensor mB_nkl = mainloop_params.tma_load_b.get_tma_tensor(make_shape(N,K,L)); // (n,k,l)
// Make tiled views, defer the slice
Tensor gA_mkl = local_tile(mA_mkl, TileShape{}, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l)
Tensor gB_nkl = local_tile(mB_nkl, TileShape{}, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l)
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
return cute::make_tuple(gA_mkl, gB_nkl);
}
else if constexpr (ModeHasScales) {
auto scale_k = mainloop_params.scale_k;
Tensor mS_mkl = mainloop_params.tma_load_scale.get_tma_tensor(make_shape(M,scale_k,L)); // (m,scale_k,l)
Tensor gS_mkl = local_tile(mS_mkl, ScaleTileShape{}, make_coord(_,_)); // (BLK_M,BLK_Scale_K,m,scale_k,l)
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
return cute::make_tuple(gA_mkl, gB_nkl, gS_mkl);
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
Tensor mZ_mkl = mainloop_params.tma_load_zero.get_tma_tensor(make_shape(M,scale_k,L)); // (m,scale_k,l)
Tensor gZ_mkl = local_tile(mZ_mkl, ScaleTileShape{}, make_coord(_,_)); // (BLK_M,BLK_Scale_K,m,scale_k,l)
return cute::make_tuple(gA_mkl, gB_nkl, gS_mkl, gZ_mkl);
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in load_init.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in load_init.");
}
}
/// Perform a collective-scoped matrix multiply-accumulate
/// Producer Perspective
/// This overload gets triggered when we have scales.
template <
class... Ts,
class KTileIterator, class BlockCoord
>
CUTLASS_DEVICE void
load(
Params const& mainloop_params,
MainloopPipeline pipeline,
PipelineState smem_pipe_write,
cute::tuple<Ts...> const& load_inputs,
BlockCoord const& blk_coord,
KTileIterator k_tile_iter, int k_tile_count,
int thread_idx,
uint32_t block_rank_in_cluster,
TensorStorage& shared_tensors) {
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
static_assert(sizeof... (Ts) == 2, "Direct convert needs two inputs");
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
static_assert(sizeof... (Ts) == 3, "Scaled convert needs three inputs");
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
static_assert(sizeof... (Ts) == 4, "Scaled and zero convert needs four inputs");
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in TMA load.");
}
int lane_predicate = cute::elect_one_sync();
if (lane_predicate) {
Tensor sA_ = make_tensor(make_smem_ptr(shared_tensors.smem_A.begin()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB_ = make_tensor(make_smem_ptr(shared_tensors.smem_B.begin()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
Tensor sA = as_position_independent_swizzle_tensor(sA_); // (BLK_M,BLK_K,PIPE)
Tensor sB = as_position_independent_swizzle_tensor(sB_); // (BLK_N,BLK_K,PIPE)
//
// Prepare the TMA loads for A, B and Scales
//
constexpr uint32_t cluster_shape_x = get<0>(ClusterShape());
uint2 cluster_local_block_id = {block_rank_in_cluster % cluster_shape_x, block_rank_in_cluster / cluster_shape_x};
Tensor gA_mkl = get<0>(load_inputs);
Tensor gB_nkl = get<1>(load_inputs);
auto block_tma_a = mainloop_params.tma_load_a.get_slice(cluster_local_block_id.y);
auto block_tma_b = mainloop_params.tma_load_b.get_slice(cluster_local_block_id.x);
// Partition the inputs based on the current block coordinates.
auto [m_coord, n_coord, k_coord, l_coord] = blk_coord;
Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k)
Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k)
// Applies the mapping from block_tma_a
Tensor tAgA = block_tma_a.partition_S(gA); // (TMA,TMA_M,TMA_K,k)
Tensor tAsA = block_tma_a.partition_D(sA); // (TMA,TMA_M,TMA_K,PIPE)
Tensor tBgB = block_tma_b.partition_S(gB); // (TMA,TMA_N,TMA_K,k)
Tensor tBsB = block_tma_b.partition_D(sB); // (TMA,TMA_N,TMA_K,PIPE)
uint16_t mcast_mask_a = 0;
uint16_t mcast_mask_b = 0;
uint16_t mcast_mask_s = 0;
// Issue TmaLoads
// Maps the tile -> block, value
if constexpr (cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>) {
auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id
for (int n = 0; n < size<1>(block_layout); ++n) {
mcast_mask_a |= (uint16_t(1) << block_layout(cluster_local_block_id.x,n,Int<0>{}));
}
}
if constexpr (cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>) {
auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id
for (int m = 0; m < size<0>(block_layout); ++m) {
mcast_mask_b |= (uint16_t(1) << block_layout(m,cluster_local_block_id.y,Int<0>{}));
}
}
auto extra_input_partitions = partition_extra_tma_inputs(mainloop_params, load_inputs, shared_tensors, cluster_local_block_id, m_coord, l_coord);
// Mainloop
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 0; --k_tile_count) {
// LOCK smem_pipe_write for _writing_
pipeline.producer_acquire(smem_pipe_write);
//
// Copy gmem to smem for *k_tile_iter
//
using BarrierType = typename MainloopPipeline::ProducerBarrierType;
BarrierType* tma_barrier = pipeline.producer_get_barrier(smem_pipe_write);
int write_stage = smem_pipe_write.index();
copy(mainloop_params.tma_load_a.with(*tma_barrier, mcast_mask_a), tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,write_stage));
copy(mainloop_params.tma_load_b.with(*tma_barrier, mcast_mask_b), tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,write_stage));
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
// Nothing extra to do.
}
else if constexpr (ModeHasScales) {
auto tSgS = get<0>(extra_input_partitions);
auto tSsS = get<1>(extra_input_partitions);
// Temporary factor which will determine which k tile to reload from gmem. Needed so we don't modify tma transaction bytes
// on the fly.
// We must do a ceiling divide here to correctly handle with group_size == K. In that case, we don't require that K
// is a multiple of the threadblock tile K
const int ReloadFactor = (mainloop_params.group_size + size<2>(TileShape{}) - 1) / size<2>(TileShape{});
const int scale_load_k = *k_tile_iter / ReloadFactor; // This will always be 0 when group_size == K.
copy(mainloop_params.tma_load_scale.with(*tma_barrier, mcast_mask_s), tSgS(_,_,_,scale_load_k), tSsS(_,_,_,write_stage));
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
// Nothing extra to do
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
auto tZgZ = get<2>(extra_input_partitions);
auto tZsZ = get<3>(extra_input_partitions);
copy(mainloop_params.tma_load_zero.with(*tma_barrier, mcast_mask_s), tZgZ(_,_,_,scale_load_k), tZsZ(_,_,_,write_stage));
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled for TMA copy op.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled for TMA copy op.");
}
++k_tile_iter;
// Advance smem_pipe_write
++smem_pipe_write;
}
}
}
/// Perform a Producer Epilogue to prevent early exit of blocks in a Cluster
CUTLASS_DEVICE void
load_tail(MainloopPipeline pipeline, PipelineState smem_pipe_write) {
int lane_predicate = cute::elect_one_sync();
// Issue the epilogue waits
if (lane_predicate) {
/* This helps avoid early exit of blocks in Cluster
* Waits for all stages to either be released (all
* Consumer UNLOCKs), or if the stage was never used
* then would just be acquired since the phase was
* still inverted from make_producer_start_state
*/
pipeline.producer_tail(smem_pipe_write);
}
}
/// Perform a collective-scoped matrix multiply-accumulate
/// Consumer Perspective
template <
class FrgTensorC
>
CUTLASS_DEVICE void
mma(MainloopPipeline pipeline,
PipelineState smem_pipe_read,
FrgTensorC& accum,
int k_tile_count,
int thread_idx,
TensorStorage& shared_tensors,
Params const& mainloop_params) {
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(InternalSmemLayoutAtomA{}) == 2, "InternalSmemLayoutAtomA must be rank 2.");
static_assert(cute::rank(InternalSmemLayoutAtomB{}) == 2, "InternalSmemLayoutAtomB must be rank 2.");
static_assert(!cute::is_void_v<InternalSmemCopyAtomA>,
"SM90 GMMA mainloops must specify a non-void copy atom for RF sourced instructions.");
static_assert(cute::is_void_v<InternalSmemCopyAtomB>,
"SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions.");
// Obtain warp index
int warp_idx = canonical_warp_idx_sync();
[[maybe_unused]] int warp_group_thread_idx = thread_idx % 128;
Tensor sA_ = make_tensor(make_smem_ptr(shared_tensors.smem_A.begin()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sA = as_position_independent_swizzle_tensor(sA_); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(shared_tensors.smem_B.begin()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
//
// Define C accumulators and A/B partitioning
//
// Layout of warp group to thread mapping
static_assert(stride<0>(typename TiledMma::BLayout{}) == 0 and
size<0>(typename TiledMma::BLayout{}) == NumThreadsPerWarpGroup,
"Stride of the first mode must be 0 and the size of the mode must be NumThreadsPerWarpGroup");
constexpr int MmaWarpGroups = size(TiledMma{}) / NumThreadsPerWarpGroup;
Layout warp_group_thread_layout = make_layout(Int<MmaWarpGroups>{},
Int<NumThreadsPerWarpGroup>{});
int warp_group_idx = __shfl_sync(0xFFFFFFFF, thread_idx / NumThreadsPerWarpGroup, 0);
TiledMma tiled_mma;
auto mma_thread_slice = tiled_mma.get_thread_slice(thread_idx);
Tensor tCsA = mma_thread_slice.partition_A(sA);
auto mma_warpgroup_slice = tiled_mma.get_slice(warp_group_thread_layout(warp_group_idx));
// Allocate fragments and descriptors
Tensor tCrA_mma = mma_thread_slice.partition_fragment_A(sA(_,_,Int<0>{})); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCrA_load = make_fragment_like<RealInternalElementA>(tCrA_mma);
Tensor tCsB = mma_warpgroup_slice.partition_B(sB); // (MMA,MMA_N,MMA_K,PIPE)
Tensor tCrB = mma_warpgroup_slice.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K,PIPE)
//
// Copy Atom A retiling
//
auto smem_tiled_copy_A = make_tiled_copy_A(InternalSmemCopyAtomA{}, tiled_mma);
auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(warp_group_thread_idx);
Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA_load); // (CPY,CPY_M,CPY_K)
// Compute the max vector length that can be used to copy A. This will match the vector width of the
// conversions used. It helps by allowing the compiler to convert using the same register that was used
// to load the data from smem. This significantly reduces the need to move data among registers.
// Note that this is correct even if copy fails to vectorize, since the granularity at which we perform
// the conversion does not impact correctness.
using A_CPY_VEC = decltype(max_common_vector(tCsA, tCrA_copy_view));
// Partition of thread -> shared and thread -> RF
auto partitioned_extra_info = partition_extra_mma_info(mma_thread_slice, shared_tensors);
auto copy_partitions_extra_info = retile_extra_mma_info(tiled_mma, partitioned_extra_info, warp_group_thread_idx);
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K
CUTE_STATIC_ASSERT_V(size<1>(tCrA_mma) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<2>(accum)); // N
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // K
CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tCsB)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE
//
// PIPELINED MAIN LOOP
//
// We release buffers to producer warps(dma load) with some mmas in flight
PipelineState smem_pipe_release = smem_pipe_read;
tiled_mma.accumulate_ = GMMA::ScaleOut::Zero;
warpgroup_fence_operand(accum);
constexpr int K_BLOCK_MAX = size<2>(tCrA_load);
ConsumerToken barrier_token = {BarrierStatus::WaitAgain};
// first k tile
{
barrier_token = pipeline.consumer_try_wait(smem_pipe_read);
pipeline.consumer_wait(smem_pipe_read, barrier_token);
int read_stage = smem_pipe_read.index();
++smem_pipe_read;
barrier_token = pipeline.consumer_try_wait(smem_pipe_read);
// copy smem->rmem for A operand
copy_A_and_extra_info(smem_tiled_copy_A, tCsA, tCrA_copy_view,
partitioned_extra_info, copy_partitions_extra_info, 0, read_stage);
transform_A_kblock(tCrA_load, A_CPY_VEC{}, tCrA_mma, partitioned_extra_info, 0);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block) {
if (k_block < K_BLOCK_MAX - 1) {
copy_A_and_extra_info(smem_tiled_copy_A, tCsA, tCrA_copy_view,
partitioned_extra_info, copy_partitions_extra_info, k_block + 1, read_stage);
transform_A_kblock(tCrA_load, A_CPY_VEC{}, tCrA_mma, partitioned_extra_info, k_block + 1);
}
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA_mma(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
}
--k_tile_count;
if (k_tile_count > 0) {
// Wait for K_BLOCK_MAX - 1 to be in flight to ensure that it is safe to overwrite the A registers for the first mma.
warpgroup_wait<K_BLOCK_MAX - 1>();
pipeline.consumer_wait(smem_pipe_read, barrier_token);
copy_A_and_extra_info(smem_tiled_copy_A, tCsA, tCrA_copy_view,
partitioned_extra_info, copy_partitions_extra_info, 0, smem_pipe_read.index());
transform_A_kblock(tCrA_load, A_CPY_VEC{}, tCrA_mma, partitioned_extra_info, 0);
}
}
if (k_tile_count == 0) {
return;
}
warpgroup_fence_operand(accum);
// Mainloop GMMAs
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 1; --k_tile_count) {
//
// Compute on k_tile
//
int read_stage = smem_pipe_read.index();
++smem_pipe_read;
warpgroup_fence_operand(accum);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block) {
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA_mma(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
warpgroup_wait<K_BLOCK_MAX - 1>();
if (k_block == K_BLOCK_MAX - 1) {
// We have K_BLOCK_MAX - 1 GMMA instructions pending for this stage, so we can release prior barrier
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
if (k_block == 0) {
barrier_token = pipeline.consumer_try_wait(smem_pipe_read);
}
if (k_block == K_BLOCK_MAX - 1) {
pipeline.consumer_wait(smem_pipe_read, barrier_token);
copy_A_and_extra_info(smem_tiled_copy_A, tCsA, tCrA_copy_view,
partitioned_extra_info, copy_partitions_extra_info, 0, smem_pipe_read.index());
transform_A_kblock(tCrA_load, A_CPY_VEC{}, tCrA_mma, partitioned_extra_info, 0);
}
else {
copy_A_and_extra_info(smem_tiled_copy_A, tCsA, tCrA_copy_view,
partitioned_extra_info, copy_partitions_extra_info, k_block + 1, read_stage);
transform_A_kblock(tCrA_load, A_CPY_VEC{}, tCrA_mma, partitioned_extra_info, k_block + 1);
}
}
warpgroup_fence_operand(accum);
}
warpgroup_fence_operand(accum);
{
//
// Compute on k_tile
//
int read_stage = smem_pipe_read.index();
warpgroup_fence_operand(accum);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block) {
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA_mma(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
warpgroup_wait<K_BLOCK_MAX - 1>();
if (k_block == K_BLOCK_MAX - 1) {
// release prior barrier
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
if (k_block < K_BLOCK_MAX - 1) {
copy_A_and_extra_info(smem_tiled_copy_A, tCsA, tCrA_copy_view,
partitioned_extra_info, copy_partitions_extra_info, k_block + 1, read_stage);
transform_A_kblock(tCrA_load, A_CPY_VEC{}, tCrA_mma, partitioned_extra_info, k_block + 1);
}
}
}
warpgroup_fence_operand(accum);
}
/// Perform a Consumer Epilogue to release all buffers
CUTLASS_DEVICE void
mma_tail(MainloopPipeline pipeline, PipelineState smem_pipe_release, int k_tile_count) {
// Prologue GMMAs
int prologue_mma_count = 1;
k_tile_count -= prologue_mma_count;
smem_pipe_release.advance(k_tile_count);
// Wait on all GMMAs to complete
warpgroup_wait<0>();
for (int count = 0; count < prologue_mma_count; ++count) {
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
}
private:
/// Utilities for any additional inputs inside of the TMA load
template <class... Ts>
CUTLASS_DEVICE
auto partition_extra_tma_inputs(
Params const& mainloop_params,
cute::tuple<Ts...> const& load_inputs,
TensorStorage& shared_tensors,
uint2 const& cluster_local_block_id,
int const m_coord,
int const l_coord) {
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
return cute::make_tuple();
}
else if constexpr (ModeHasScales) {
Tensor sS = make_tensor(make_smem_ptr(shared_tensors.smem_scale.begin()), SmemLayoutScale{}); // (BLK_M,BLK_K,PIPE)
Tensor gS_mkl = get<2>(load_inputs);
auto block_tma_s = mainloop_params.tma_load_scale.get_slice(cluster_local_block_id.y);
Tensor gS = gS_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k)
Tensor tSgS = block_tma_s.partition_S(gS); // (TMA,TMA_M,TMA_K,k)
Tensor tSsS = block_tma_s.partition_D(sS); // (TMA,TMA_M,TMA_K,PIPE)
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
return cute::make_tuple(tSgS, tSsS);
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
Tensor sZ = make_tensor(make_smem_ptr(shared_tensors.smem_zero.begin()), SmemLayoutScale{}); // (BLK_M,BLK_K,PIPE)
Tensor gZ_mkl = get<3>(load_inputs);
auto block_tma_z = mainloop_params.tma_load_zero.get_slice(cluster_local_block_id.y);
Tensor gZ = gZ_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k)
Tensor tZgZ = block_tma_z.partition_S(gZ); // (TMA,TMA_M,TMA_K,k)
Tensor tZsZ = block_tma_z.partition_D(sZ); // (TMA,TMA_M,TMA_K,PIPE)
return cute::make_tuple(tSgS, tSsS, tZgZ, tZsZ);
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled for input partitioning.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled for input partitioning.");
}
}
/// Utilities for partitioning extra inputs for loading from smem in the mainloop.
template <class ThreadMma>
CUTLASS_DEVICE
auto partition_extra_mma_info(
ThreadMma const& mma_thread_slice,
TensorStorage& shared_tensors) {
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
// nothing to do
return cute::make_tuple();
}
else if constexpr (ModeHasScales) {
Tensor sS = make_tensor(make_smem_ptr(shared_tensors.smem_scale.begin()), SmemLayoutScale{});// (BLK_M,BLK_SCALE_K,PIPE)
Tensor tCsS = mma_thread_slice.partition_A(sS);
Tensor tCrS = make_tensor<ElementScale>(mma_thread_slice.partition_fragment_A(sS(_,_,Int<0>{})).shape());
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
return cute::make_tuple(tCsS, tCrS);
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
Tensor sZ = make_tensor(make_smem_ptr(shared_tensors.smem_zero.begin()), SmemLayoutScale{});// (BLK_M,BLK_SCALE_K,PIPE)
Tensor tCsZ = mma_thread_slice.partition_A(sZ);
Tensor tCrZ = make_tensor<ElementZero>(mma_thread_slice.partition_fragment_A(sZ(_,_,Int<0>{})).shape());
return cute::make_tuple(tCsS, tCrS, tCsZ, tCrZ);
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in A -> RF path.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in A -> RF path.");
}
}
/// Returns the tiled copy and copy views for the extra inputs.
template <class TiledMma, class... Ts>
CUTLASS_DEVICE
auto retile_extra_mma_info(
TiledMma const& tiled_mma,
cute::tuple<Ts...>& partitioned_extra_info,
int const warp_group_thread_idx) {
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
// nothing to do
return cute::make_tuple();
}
else if constexpr (ModeHasScales) {
auto smem_tiled_copy_S = make_tiled_copy_A(SmemCopyAtomScale{}, tiled_mma);
auto smem_thr_copy_S = smem_tiled_copy_S.get_thread_slice(warp_group_thread_idx);
Tensor tCrS_copy_view = smem_thr_copy_S.retile_D(cute::get<1>(partitioned_extra_info)); // (CPY,CPY_M,CPY_K)
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
return cute::make_tuple(smem_tiled_copy_S, tCrS_copy_view);
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
Tensor tCrZ_copy_view = smem_thr_copy_S.retile_D(cute::get<3>(partitioned_extra_info)); // (CPY,CPY_M,CPY_K)
return cute::make_tuple(smem_tiled_copy_S, tCrS_copy_view, tCrZ_copy_view);
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in A -> RF path.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in A -> RF path.");
}
}
/// Utilities to copy A and extra inputs from smem to RF
template <class SmemTiledCopyA,
class TensorASmemView,
class TensorACopyView,
class... Ts,
class... Us
>
CUTLASS_DEVICE
void copy_A_and_extra_info(
SmemTiledCopyA const& smem_tiled_copy_A,
TensorASmemView const& tCsA,
TensorACopyView& tCrA_copy_view,
cute::tuple<Ts...> const& partitioned_mma_extra_info,
cute::tuple<Us...> const& tiled_copy_and_views,
int k_block,
int read_stage) {
copy(smem_tiled_copy_A, tCsA(_,_,k_block,read_stage), tCrA_copy_view(_,_,k_block));
if (k_block == 0) {
// We are starting a new k-tile so copy the scale
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
// nothing to do
}
else if constexpr (ModeHasScales) {
auto smem_tiled_copy_S = cute::get<0>(tiled_copy_and_views);
auto tCrS_copy_view = cute::get<1>(tiled_copy_and_views);
auto tCsS = cute::get<0>(partitioned_mma_extra_info);
copy(smem_tiled_copy_S, tCsS(_,_,k_block,read_stage), tCrS_copy_view(_,_,k_block));
if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
// Nothing extra to do
} else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
auto tCsZ = cute::get<2>(partitioned_mma_extra_info);
auto tCrZ_copy_view = cute::get<2>(tiled_copy_and_views);
copy(smem_tiled_copy_S, tCsZ(_,_,k_block,read_stage), tCrZ_copy_view(_,_,k_block));
} else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in A -> RF path.");
}
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "Conversion mode not handled in A -> RF path.");
}
}
}
/// Utilities to transform A.
template <class TCrA_load,
int VectorWidthA,
class TCrA_mma,
class... Ts>
CUTLASS_DEVICE
void transform_A_kblock(
TCrA_load const& tCrA_load,
cute::Int<VectorWidthA> vec_A,
TCrA_mma& tCrA_mma,
cute::tuple<Ts...> const& partitioned_extra_info,
int const k_block) {
if constexpr (KernelConversionMode == ConversionMode::DirectConvert) {
transform_internal_A(tCrA_load(_, _, k_block), vec_A, tCrA_mma(_, _, k_block));
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScale) {
auto tCrS = cute::get<1>(partitioned_extra_info);
transform_internal_A(tCrA_load(_, _, k_block), vec_A, make_fragment_like<ElementScale>(tCrA_mma)(_, _, k_block), tCrS(_, _, 0), tCrA_mma(_, _, k_block));
}
else if constexpr (KernelConversionMode == ConversionMode::ConvertAndScaleWithZero) {
auto tCrS = cute::get<1>(partitioned_extra_info);
auto tCrZ = cute::get<3>(partitioned_extra_info);
transform_internal_A(tCrA_load(_, _, k_block),
vec_A,
make_fragment_like<ElementScale>(tCrA_mma)(_, _, k_block),
tCrS(_, _, 0),
tCrZ(_, _, 0),
make_fragment_like<ElementScale>(tCrZ)(_, _, 0),
tCrA_mma(_, _, k_block));
}
else {
static_assert(cutlass::detail::dependent_false<KernelSchedule>, "No A data is loaded.");
}
}
/// Utilities for transforming the A operand prior to issuing tensorcore math.
template <class EngineIn,
class EngineOut,
class TensorLayout,
int ConversionVectorWidth = cosize_v<TensorLayout>>
CUTLASS_DEVICE void
convert_tensor(
Tensor<EngineIn,TensorLayout> const& in,
Tensor<EngineOut,TensorLayout>& out,
cute::Int<ConversionVectorWidth> width = {}) {
/// This is an element-wise conversion where we expect both tensors to have the same layout.
/// As a result, we can cast as a cutlass array to use the fast numeric converters without
/// worrying about indexing into the layout.
constexpr int N = cosize_v<TensorLayout>;
/// The inputs must be backed by registers & be statically sized.
static_assert(is_rmem<EngineIn>::value, "Input tensor for A conversion must come from registers");
static_assert(is_rmem<EngineOut>::value, "Output tensor for A conversion must come from registers");
static_assert(is_static_v<TensorLayout>, "Tensor layout for the conversion must be static");
static_assert(cosize_v<TensorLayout> == size(TensorLayout{}), "Cosize and size of the layout must be equal.");
static_assert(N % ConversionVectorWidth == 0, "Conversion vector width must divide cosize of the tensor layout.");
using SrcType = typename EngineIn::value_type;
using DstType = typename EngineOut::value_type;
using SrcArray = cutlass::Array<SrcType, ConversionVectorWidth>;
using DstArray = cutlass::Array<DstType, ConversionVectorWidth>;
constexpr cutlass::FloatRoundStyle RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
using Converter = cutlass::NumericArrayConverter<DstType, SrcType, ConversionVectorWidth, RoundStyle>;
constexpr int NumIterations = N / ConversionVectorWidth;
for (int ii = 0; ii < NumIterations; ++ii) {
SrcArray const* src_array_ptr = reinterpret_cast<SrcArray const*>(raw_pointer_cast(in.data())) + ii;
DstArray* dst_array_ptr = reinterpret_cast<DstArray*>(raw_pointer_cast(out.data())) + ii;
*dst_array_ptr = Converter::convert(*src_array_ptr);
}
}
template <class EngineIn,
class EngineOut,
class TensorLayout,
int A_VectorConversionWidth>
CUTLASS_DEVICE void
transform_internal_A(
Tensor<EngineIn,TensorLayout>&& in,
cute::Int<A_VectorConversionWidth> a_vec_width,
Tensor<EngineOut,TensorLayout>&& out) {
convert_tensor(in, out, a_vec_width);
}
template <class EngineIn,
class EngineInputBuffer,
class EngineScale,
class EngineOut,
class TensorLayout,
int A_VectorConversionWidth>
CUTLASS_DEVICE void
transform_internal_A(
Tensor<EngineIn,TensorLayout>&& in,
cute::Int<A_VectorConversionWidth> a_vec_width,
Tensor<EngineInputBuffer,TensorLayout>&& converted_inputs,
Tensor<EngineScale,TensorLayout>&& scales,
Tensor<EngineOut,TensorLayout>&& out) {
static_assert(cute::is_same_v<typename EngineInputBuffer::value_type, typename EngineScale::value_type>,
"Type of the engine input buffer must equal the scale buffer");
// First, we upcast the inputs to the scale type
convert_tensor(in, converted_inputs, a_vec_width);
// Apply scales and broadcast across inputs, store in converted_inputs
cute::transform(converted_inputs, scales, converted_inputs, cute::multiplies{});
// Finally, we convert the scaled inputs to the mma type.
convert_tensor(converted_inputs, out);
}
template <class EngineIn,
class EngineInputBuffer,
class EngineScale,
class EngineZero,
class EngineZeroBuffer,
class EngineOut,
class TensorLayout,
int A_VectorConversionWidth>
CUTLASS_DEVICE void
transform_internal_A(
Tensor<EngineIn,TensorLayout>&& in,
cute::Int<A_VectorConversionWidth> a_vec_width,
Tensor<EngineInputBuffer,TensorLayout>&& converted_inputs,
Tensor<EngineScale,TensorLayout>&& scales,
Tensor<EngineZero,TensorLayout>&& zeros,
Tensor<EngineZeroBuffer,TensorLayout>&& converted_zeros,
Tensor<EngineOut,TensorLayout>&& out) {
static_assert(cute::is_same_v<typename EngineInputBuffer::value_type, typename EngineScale::value_type>,
"Type of the engine input buffer must equal the scale buffer");
static_assert(cute::is_same_v<typename EngineZeroBuffer::value_type, typename EngineScale::value_type>,
"Type of the engine zero buffer must equal the scale buffer");
// First, we upcast the inputs to the scale type
convert_tensor(in, converted_inputs, a_vec_width);
convert_tensor(zeros, converted_zeros);
// Apply scales and broadcast across inputs, store in converted_inputs
cute::transform(converted_inputs, scales, converted_inputs, cute::multiplies{});
cute::transform(converted_inputs, converted_zeros, converted_inputs, cute::plus{});
// Finally, we convert the scaled inputs to the mma type.
convert_tensor(converted_inputs, out);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/collective/sm90_mma_tma_gmma_rs_warpspecialized_mixed_input.hpp/0 | {
"file_path": "include/cutlass/gemm/collective/sm90_mma_tma_gmma_rs_warpspecialized_mixed_input.hpp",
"repo_id": "include",
"token_count": 27274
} | 24 |
/***************************************************************************************************
* Copyright (c) 2024 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a sparse GEMM kernel that computes the absolute maximum of the output tensor
and applies additional scaling factors to operands.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/sparse_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_sparse_with_absmax.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassSimt,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm70,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
typename threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator>
class SparseGemmWithAbsmax {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
using MathOperator = Operator;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Define the kernel
using GemmKernel = typename kernel::DefaultSparseGemmWithAbsmax<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator
>::GemmKernel;
using ElementE = typename GemmKernel::ElementE;
using LayoutE = typename GemmKernel::LayoutE;
static int const kAlignmentE = 128 / sizeof_bits<ElementE>::value;
static int const kSparse = GemmKernel::kSparse;
static int const kMetaSizeInBits = GemmKernel::kMetaSizeInBits;
static int const kElementsPerElementE = GemmKernel::kElementsPerElementE;
using Arguments = typename GemmKernel::Arguments;
private:
/// Kernel parameters object
typename GemmKernel::Params params_;
public:
/// Constructs the GEMM.
SparseGemmWithAbsmax() { }
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
Status status = GemmKernel::can_implement(
args.problem_size,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_C.non_const_ref(),
args.ref_D,
args.ref_E.non_const_ref()
);
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
if (kSplitKSerial && args.split_k_slices > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
if (kSplitKSerial) {
if (args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
}
// Initialize the Params structure
params_ = typename GemmKernel::Params{
args.problem_size,
grid_shape,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_C.non_const_ref(),
args.ref_D,
args.ref_E.non_const_ref(),
args.ref_Aux,
args.ptr_Vector,
args.ldr,
args.epilogue,
static_cast<int *>(workspace)
};
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
params_.ref_A.reset(args.ref_A.non_const_ref().data());
params_.ref_B.reset(args.ref_B.non_const_ref().data());
params_.ref_C.reset(args.ref_C.non_const_ref().data());
params_.ref_D.reset(args.ref_D.data());
params_.ref_E.reset(args.ref_E.non_const_ref().data());
params_.output_op = args.epilogue;
params_.semaphore = static_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(GemmKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
cutlass::Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/gemm_sparse_with_absmax.h/0 | {
"file_path": "include/cutlass/gemm/device/gemm_sparse_with_absmax.h",
"repo_id": "include",
"token_count": 4371
} | 25 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/arch/arch.h"
#include "cutlass/gemm/gemm.h"
#include "cute/layout.hpp"
#include "cute/numeric/integral_constant.hpp"
//////////////////////////////////////////////////////////////////////////////
namespace cutlass::detail {
template <class T, template <int...> class U>
struct is_kernel_tag_of : cute::false_type {};
template <template <int...> class U, int... Args>
struct is_kernel_tag_of<U<Args...>, U> : cute::true_type {};
template <class T, template <int...> class U>
constexpr bool is_kernel_tag_of_v = is_kernel_tag_of<T, U>::value;
}
//////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm {
using namespace cute;
//////////////////////////////////////////////////////////////////////////////
namespace detail {
enum class KernelInputTransformType {
FastF32,
InterleavedComplexTF32
};
} // namespace detail
//////////////////////////////////////////////////////////////////////////////
namespace kernel::detail {
// Has_SwapAB<T>::value will be true only if:
// class T has member SwapAB and T::SwapAB is true
template <typename T, typename = void>
struct Has_SwapAB { static constexpr bool value = false; };
template <typename T>
struct Has_SwapAB <T, CUTE_STL_NAMESPACE::void_t<decltype(T::SwapAB)>>
{ static constexpr bool value = T::SwapAB; };
template <typename T>
static constexpr bool Has_SwapAB_v = Has_SwapAB<T>::value;
} // namespace kernel::detail
//////////////////////////////////////////////////////////////////////////////
//
// Kernel schedule policies (the base class tags, one for each kernel layer file)
//
struct KernelMultistage { };
struct KernelCpAsyncWarpSpecialized { };
struct KernelCpAsyncWarpSpecializedPingpong { };
struct KernelCpAsyncWarpSpecializedCooperative { };
struct KernelTma { };
struct KernelTmaWarpSpecialized { };
struct KernelTmaWarpSpecializedPingpong { };
struct KernelTmaWarpSpecializedCooperative { };
struct KernelPtrArrayTmaWarpSpecializedCooperative { };
//////////////////////////////////////////////////////////////////////////////
//
// Builder dispatch policies (not a part of the main CUTLASS layers, simply used to opt into
// specific collective builder dispatches)
//
// FP8 related policies (including Fast Accumulation)
struct KernelTmaWarpSpecializedFP8FastAccum : KernelTmaWarpSpecialized { };
struct KernelTmaWarpSpecializedPingpongFP8FastAccum : KernelTmaWarpSpecializedPingpong { };
struct KernelTmaWarpSpecializedCooperativeFP8FastAccum: KernelTmaWarpSpecializedCooperative { };
struct KernelPtrArrayTmaWarpSpecializedCooperativeFP8FastAccum : KernelPtrArrayTmaWarpSpecializedCooperative { };
// Policies to opt into mixed type GEMMs
struct KernelTmaWarpSpecializedMixedInput : KernelTmaWarpSpecialized { };
struct KernelTmaWarpSpecializedPingpongMixedInput : KernelTmaWarpSpecializedPingpong { };
struct KernelTmaWarpSpecializedCooperativeMixedInput: KernelTmaWarpSpecializedCooperative { };
//////////////////////////////////////////////////////////////////////////////
// Policies for dispatch of epilogue
struct EpilogueDefault { };
struct EpilogueTransposed { };
//////////////////////////////////////////////////////////////////////////////
//
// Collective Mainloop Policies
//
// 2 stage pipeline through 1 stage in smem, 1 in rmem, WITHOUT predicated gmem loads
struct MainloopSm70TwoStageUnpredicated {
constexpr static int Stages = 2;
using ArchTag = arch::Sm70;
using Schedule = KernelMultistage;
using ClusterShape = Shape<_1,_1,_1>;
};
// 2 stage pipeline through 1 stage in smem, 1 in rmem, with predicated gmem loads
struct MainloopSm70TwoStage {
constexpr static int Stages = 2;
using ArchTag = arch::Sm70;
using Schedule = KernelMultistage;
using ClusterShape = Shape<_1,_1,_1>;
};
// n-buffer in smem (cp.async), pipelined with registers, WITHOUT predicated gmem loads
template<int Stages_>
struct MainloopSm80CpAsyncUnpredicated {
constexpr static int Stages = Stages_;
using ArchTag = arch::Sm80;
using Schedule = KernelMultistage;
using ClusterShape = Shape<_1,_1,_1>;
};
// n-buffer in smem (cp.async), pipelined with registers, with predicated gmem loads
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>
>
struct MainloopSm80CpAsync {
constexpr static int Stages = Stages_;
using ArchTag = cute::conditional_t<(size(ClusterShape_{}) > 1), arch::Sm90, arch::Sm80>;
using Schedule = KernelMultistage;
using ClusterShape = ClusterShape_;
};
// n-buffer in smem (cp.async), pipelined with Hopper GMMA, with predicated gmem loads, warp specialized dynamic schedule
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelCpAsyncWarpSpecialized
>
struct MainloopSm90CpAsyncGmmaWarpSpecialized {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
using ArchTag = arch::Sm90;
using Schedule = KernelSchedule;
};
// n-buffer in smem (cp.async), pipelined with Hopper GMMA, with predicated gmem loads, warp specialized dynamic schedule
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelCpAsyncWarpSpecialized
>
struct MainloopSm90CpAsyncGmmaRmemAWarpSpecialized {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
using ArchTag = arch::Sm90;
using Schedule = KernelSchedule;
};
// n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, static schedule between TMA and GMMA
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
int PipelineAsyncMmaStages_ = 1
>
struct MainloopSm90TmaGmma {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
constexpr static int PipelineAsyncMmaStages = PipelineAsyncMmaStages_;
using ArchTag = arch::Sm90;
using Schedule = KernelTma;
};
// n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, Warp specialized dynamic schedule
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelTmaWarpSpecializedCooperative
>
struct MainloopSm90TmaGmmaWarpSpecialized {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
using ArchTag = arch::Sm90;
using Schedule = KernelSchedule;
};
// n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, Warp specialized dynamic schedule
// With GMMA's A data from registers.
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelTmaWarpSpecialized
>
struct MainloopSm90TmaGmmaRmemAWarpSpecialized {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
using ArchTag = arch::Sm90;
using Schedule = KernelSchedule;
static_assert(
cute::is_same_v<Schedule, KernelTmaWarpSpecialized> ||
cute::is_same_v<Schedule, KernelTmaWarpSpecializedPingpong> ||
cute::is_same_v<Schedule, KernelTmaWarpSpecializedCooperative>,
"KernelSchedule must be one of the warp specialized policies");
};
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelTmaWarpSpecialized
>
struct MainloopSm90TmaGmmaRmemAWarpSpecializedMixedInput {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
using ArchTag = arch::Sm90;
using Schedule = KernelSchedule;
static_assert(
cute::is_same_v<Schedule, KernelTmaWarpSpecialized> ||
cute::is_same_v<Schedule, KernelTmaWarpSpecializedMixedInput> ||
cute::is_same_v<Schedule, KernelTmaWarpSpecializedPingpong> ||
cute::is_same_v<Schedule, KernelTmaWarpSpecializedPingpongMixedInput> ||
cute::is_same_v<Schedule, KernelTmaWarpSpecializedCooperative> ||
cute::is_same_v<Schedule, KernelTmaWarpSpecializedCooperativeMixedInput>,
"KernelSchedule must be one of the warp specialized policies");
};
// n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, Warp specialized dynamic schedule
// For FP8 kernels
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelTmaWarpSpecialized
>
struct MainloopSm90TmaGmmaWarpSpecializedFP8
: MainloopSm90TmaGmmaWarpSpecialized<Stages_, ClusterShape_, KernelSchedule> {
static_assert(
cute::is_same_v<KernelSchedule, KernelTmaWarpSpecialized> ||
cute::is_same_v<KernelSchedule, KernelTmaWarpSpecializedPingpong> ||
cute::is_same_v<KernelSchedule, KernelTmaWarpSpecializedCooperative>,
"KernelSchedule must be one of the warp specialized policies");
};
// n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, Warp specialized dynamic schedule for Ptr-Array and Grouped Gemm
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelPtrArrayTmaWarpSpecializedCooperative
>
struct MainloopSm90ArrayTmaGmmaWarpSpecialized {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
using ArchTag = arch::Sm90;
using Schedule = KernelSchedule;
static_assert(
cute::is_base_of_v<KernelPtrArrayTmaWarpSpecializedCooperative, KernelSchedule>,
"KernelSchedule must be one of the Ptr-Array or Grouped Gemm TMA Warp Specialized Cooperative policies");
};
//////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm
| include/cutlass/gemm/dispatch_policy.hpp/0 | {
"file_path": "include/cutlass/gemm/dispatch_policy.hpp",
"repo_id": "include",
"token_count": 3361
} | 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmPlanarComplex {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
using Operator = typename Mma::Operator;
using ArchTag = typename Mma::ArchTag;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(
128 / sizeof_bits<ElementA>::value,
128 / sizeof_bits<ElementB>::value);
//
// Additional types needed for reflection
//
using ElementAccumulator = typename Mma::Policy::Operator::ElementC;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::Shape;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
//
// Arguments structure
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue{};
void const * ptr_A_real{nullptr};
void const * ptr_A_imag{nullptr};
void const * ptr_B_real{nullptr};
void const * ptr_B_imag{nullptr};
void const * ptr_C_real{nullptr};
void const * ptr_C_imag{nullptr};
void * ptr_D_real{nullptr};
void * ptr_D_imag{nullptr};
typename LayoutA::Stride::Index lda_real{};
typename LayoutA::Stride::Index lda_imag{};
typename LayoutB::Stride::Index ldb_real{};
typename LayoutB::Stride::Index ldb_imag{};
typename LayoutC::Stride::Index ldc_real{};
typename LayoutC::Stride::Index ldc_imag{};
typename LayoutC::Stride::Index ldd_real{};
typename LayoutC::Stride::Index ldd_imag{};
int64_t batch_stride_A{0};
int64_t batch_stride_A_imag{0};
int64_t batch_stride_B{0};
int64_t batch_stride_B_imag{0};
int64_t batch_stride_C{0};
int64_t batch_stride_C_imag{0};
int64_t batch_stride_D_imag{0};
//
// Methods
//
Arguments() = default;
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A_real,
void const * ptr_A_imag,
void const * ptr_B_real,
void const * ptr_B_imag,
void const * ptr_C_real,
void const * ptr_C_imag,
void * ptr_D_real,
void * ptr_D_imag,
typename LayoutA::Stride::Index lda_real,
typename LayoutA::Stride::Index lda_imag,
typename LayoutB::Stride::Index ldb_real,
typename LayoutB::Stride::Index ldb_imag,
typename LayoutC::Stride::Index ldc_real,
typename LayoutC::Stride::Index ldc_imag,
typename LayoutC::Stride::Index ldd_real,
typename LayoutC::Stride::Index ldd_imag,
int64_t batch_stride_A = 0,
int64_t batch_stride_A_imag = 0,
int64_t batch_stride_B = 0,
int64_t batch_stride_B_imag = 0,
int64_t batch_stride_C = 0,
int64_t batch_stride_C_imag = 0,
int64_t batch_stride_D = 0,
int64_t batch_stride_D_imag = 0)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A_real(ptr_A_real),
ptr_A_imag(ptr_A_imag),
ptr_B_real(ptr_B_real),
ptr_B_imag(ptr_B_imag),
ptr_C_real(ptr_C_real),
ptr_C_imag(ptr_C_imag),
ptr_D_real(ptr_D_real),
ptr_D_imag(ptr_D_imag),
lda_real(lda_real),
lda_imag(lda_imag),
ldb_real(ldb_real),
ldb_imag(ldb_imag),
ldc_real(ldc_real),
ldc_imag(ldc_imag),
ldd_real(ldd_real),
ldd_imag(ldd_imag),
batch_stride_A(batch_stride_A),
batch_stride_A_imag(batch_stride_A_imag),
batch_stride_B(batch_stride_B),
batch_stride_B_imag(batch_stride_B_imag),
batch_stride_C(batch_stride_C),
batch_stride_C_imag(batch_stride_C_imag),
batch_stride_D_imag(batch_stride_D_imag)
{}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A_real, args.ptr_B_real);
std::swap(args.ptr_A_imag, args.ptr_B_imag);
std::swap(args.lda_real, args.ldb_real);
std::swap(args.lda_imag, args.ldb_imag);
std::swap(args.batch_stride_A, args.batch_stride_B);
std::swap(args.batch_stride_A_imag, args.batch_stride_B_imag);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
//
// Data members
//
typename Mma::IteratorA::Params params_A_real{};
typename Mma::IteratorA::Params params_A_imag{};
typename Mma::IteratorB::Params params_B_real{};
typename Mma::IteratorB::Params params_B_imag{};
typename Epilogue::OutputTileIterator::Params params_C_real{};
typename Epilogue::OutputTileIterator::Params params_C_imag{};
typename Epilogue::OutputTileIterator::Params params_D_real{};
typename Epilogue::OutputTileIterator::Params params_D_imag{};
typename EpilogueOutputOp::Params output_op{};
void * ptr_A_real{nullptr};
void * ptr_A_imag{nullptr};
void * ptr_B_real{nullptr};
void * ptr_B_imag{nullptr};
void * ptr_C_real{nullptr};
void * ptr_C_imag{nullptr};
void * ptr_D_real{nullptr};
void * ptr_D_imag{nullptr};
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
int64_t batch_stride_C{0};
int64_t batch_stride_A_imag{0};
int64_t batch_stride_B_imag{0};
int64_t batch_stride_C_imag{0};
int64_t batch_stride_D_imag{0};
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A_real(args.lda_real),
params_A_imag(args.lda_imag),
params_B_real(args.ldb_real),
params_B_imag(args.ldb_imag),
params_C_real(args.ldc_real),
params_C_imag(args.ldc_imag),
params_D_real(args.ldd_real),
params_D_imag(args.ldd_imag),
output_op(args.epilogue),
ptr_A_real(const_cast<void *>(args.ptr_A_real)),
ptr_A_imag(const_cast<void *>(args.ptr_A_imag)),
ptr_B_real(const_cast<void *>(args.ptr_B_real)),
ptr_B_imag(const_cast<void *>(args.ptr_B_imag)),
ptr_C_real(const_cast<void *>(args.ptr_C_real)),
ptr_C_imag(const_cast<void *>(args.ptr_C_imag)),
ptr_D_real(args.ptr_D_real),
ptr_D_imag(args.ptr_D_imag),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_A_imag(args.batch_stride_A_imag),
batch_stride_B_imag(args.batch_stride_B_imag),
batch_stride_C_imag(args.batch_stride_C_imag),
batch_stride_D_imag(args.batch_stride_D_imag)
{}
/// Returns the workspace size (in bytes) needed for this problem geometry
size_t get_workspace_size() const
{
size_t workspace_bytes = ParamsBase::get_workspace_size();
if (this->mode == GemmUniversalMode::kGemmSplitKParallel)
{
// Double the size returned by the base class because we need to
// accumulate two ElementC components
workspace_bytes *= 2;
}
return workspace_bytes;
}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
ptr_A_real = const_cast<void *>(args.ptr_A_real);
ptr_A_imag = const_cast<void *>(args.ptr_A_imag);
ptr_B_real = const_cast<void *>(args.ptr_B_real);
ptr_B_imag = const_cast<void *>(args.ptr_B_imag);
ptr_C_real = const_cast<void *>(args.ptr_C_real);
ptr_C_imag = const_cast<void *>(args.ptr_C_imag);
ptr_D_real = const_cast<void *>(args.ptr_D_real);
ptr_D_imag = const_cast<void *>(args.ptr_D_imag);
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C = args.batch_stride_C;
this->batch_stride_D = args.batch_stride_D;
batch_stride_A_imag = args.batch_stride_A_imag;
batch_stride_B_imag = args.batch_stride_B_imag;
batch_stride_C_imag = args.batch_stride_C_imag;
batch_stride_D_imag = args.batch_stride_D_imag;
output_op = args.epilogue;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(Arguments const &args)
{
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = args.problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = args.problem_size.m() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = args.problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = args.problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = args.problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = args.problem_size.m() % kAlignmentC;
}
if (isAMisaligned || isBMisaligned || isCMisaligned) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmPlanarComplex op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A_real = static_cast<ElementA *>(params.ptr_A_real);
ElementA *ptr_A_imag = static_cast<ElementA *>(params.ptr_A_imag);
ElementB *ptr_B_real = static_cast<ElementB *>(params.ptr_B_real);
ElementB *ptr_B_imag = static_cast<ElementB *>(params.ptr_B_imag);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_A;
ptr_A_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_A_imag;
ptr_B_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_B;
ptr_B_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_B_imag;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A_real = static_cast<ElementA * const *>(params.ptr_A_real)[threadblock_tile_offset.k()];
ptr_A_imag = static_cast<ElementA * const *>(params.ptr_A_imag)[threadblock_tile_offset.k()];
ptr_B_real = static_cast<ElementB * const *>(params.ptr_B_real)[threadblock_tile_offset.k()];
ptr_B_imag = static_cast<ElementB * const *>(params.ptr_B_imag)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A_real(
params.params_A_real,
ptr_A_real,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorA iterator_A_imag(
params.params_A_imag,
ptr_A_imag,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B_real(
params.params_B_real,
ptr_B_real,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
typename Mma::IteratorB iterator_B_imag(
params.params_B_imag,
ptr_B_imag,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A_real,
iterator_A_imag,
iterator_B_real,
iterator_B_imag,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C_real = static_cast<ElementC *>(params.ptr_C_real);
ElementC *ptr_C_imag = static_cast<ElementC *>(params.ptr_C_imag);
ElementC *ptr_D_real = static_cast<ElementC *>(params.ptr_D_real);
ElementC *ptr_D_imag = static_cast<ElementC *>(params.ptr_D_imag);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D_real += threadblock_tile_offset.k() * params.batch_stride_D;
ptr_D_imag += threadblock_tile_offset.k() * params.batch_stride_D_imag;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_C;
ptr_C_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_C_imag;
ptr_D_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_D;
ptr_D_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_D_imag;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C_real = static_cast<ElementC * const *>(params.ptr_C_real)[threadblock_tile_offset.k()];
ptr_C_imag = static_cast<ElementC * const *>(params.ptr_C_imag)[threadblock_tile_offset.k()];
ptr_D_real = static_cast<ElementC * const *>(params.ptr_D_real)[threadblock_tile_offset.k()];
ptr_D_imag = static_cast<ElementC * const *>(params.ptr_D_imag)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C_real(
params.params_C_real,
ptr_C_real,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_C_imag(
params.params_C_imag,
ptr_C_imag,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D_real(
params.params_D_real,
ptr_D_real,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_D_imag(
params.params_D_imag,
ptr_D_imag,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
//
// Construct epilogue
//
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C_real = iterator_D_real;
iterator_C_imag = iterator_D_imag;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D_real,
iterator_D_imag,
accumulators,
iterator_C_real,
iterator_C_imag);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_planar_complex.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_planar_complex.h",
"repo_id": "include",
"token_count": 9363
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/cache_operation.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_conversion.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename ElementC_,
typename ElementAccumulator_,
typename EpilogueOutputOp_,
int kElementsPerAccess_ = 1, ///< Number of elements involved in a global access.
int kThreadCount_ = 0, ///< Number of threads in the thread block.
/// It will be calculated automatically if set to 0.
int kThreadsPerRow_ = 0 ///< Number of threads in the k dimension.
/// It will be calculated automatically if set to 0.
>
struct Gemv;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
// GEMV for column-major A matrix
template <
typename ElementA_,
typename ElementB_,
typename ElementC_,
typename ElementAccumulator_,
typename EpilogueOutputOp_,
int kElementsPerAccess_,
int kThreadCount_,
int kThreadsPerRow_
>
struct Gemv <
ElementA_,
layout::ColumnMajor,
ElementB_,
ElementC_,
ElementAccumulator_,
EpilogueOutputOp_,
kElementsPerAccess_,
kThreadCount_,
kThreadsPerRow_
>{
public:
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using TensorRefA = TensorRef<ElementA, LayoutA>;
using ElementB = ElementB_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using EpilogueOutputOp = EpilogueOutputOp_;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
// thread block shape (kThreadCount, 1, 1)
static int const kThreadCount = (kThreadCount_ <= 0) ? 32 : kThreadCount_;
static int const kThreadsPerRow = (kThreadsPerRow_ <= 0) ? 1 : kThreadsPerRow_;
static int const kStages = 1;
static int const kAlignmentA = 1;
static int const kAlignmentB = 1;
static int const kAlignmentC = 1;
//
// Structures
//
/// Argument structure
struct Arguments {
MatrixCoord problem_size;
int32_t batch_count;
typename EpilogueOutputOp::Params output_op;
TensorRefA ref_A;
ElementB const *ptr_B;
ElementC const *ptr_C;
ElementC *ptr_D;
int64_t inc_B;
int64_t inc_C;
int64_t inc_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
//
// Methods
//
Arguments(): batch_count(0) { }
Arguments(
MatrixCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params output_op,
TensorRefA ref_A,
void const *ptr_B,
void const *ptr_C,
void *ptr_D,
int64_t inc_B,
int64_t inc_C,
int64_t inc_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D
):
problem_size(problem_size),
batch_count(batch_count),
output_op(output_op),
ref_A(ref_A),
ptr_B(static_cast<ElementB const *>(ptr_B)),
ptr_C(static_cast<ElementC const *>(ptr_C)),
ptr_D(static_cast<ElementC *>(ptr_D)),
inc_B(inc_B),
inc_C(inc_C),
inc_D(inc_D),
batch_stride_A(batch_stride_A),
batch_stride_B(batch_stride_B),
batch_stride_C(batch_stride_C),
batch_stride_D(batch_stride_D)
{ }
Arguments(
MatrixCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params output_op,
TensorRefA ref_A,
void const *ptr_B,
void const *ptr_C,
void *ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D
):
Arguments(
problem_size,
batch_count,
output_op,
ref_A,
ptr_B,
ptr_C,
ptr_D,
1,
1,
1,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D)
{ }
Arguments(
MatrixCoord problem_size,
typename EpilogueOutputOp::Params output_op,
TensorRefA ref_A,
void const *ptr_B,
void const *ptr_C,
void *ptr_D,
int64_t inc_B,
int64_t inc_C,
int64_t inc_D
):
Arguments(
problem_size,
1,
output_op,
ref_A,
ptr_B,
ptr_C,
ptr_D,
inc_B,
inc_C,
inc_D,
1,
1,
1,
1)
{ }
Status update(Arguments const &args) {
output_op = args.output_op;
ref_A = ref_A;
ptr_B = args.ptr_B;
ptr_C = args.ptr_C;
ptr_D = args.ptr_D;
return Status::kSuccess;
}
};
using Params = Arguments;
/// Shared memory storage structure
union SharedStorage {
};
public:
//
// Methods
//
CUTLASS_DEVICE
Gemv() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::MatrixCoord const & problem_size) {
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes one GEMV
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Loop over batch indices
for (int batch_idx = blockIdx.z; batch_idx < params.batch_count; batch_idx += gridDim.z) {
int i = blockIdx.x * kThreadCount + threadIdx.x;
ElementA const *ptr_A = params.ref_A.data() + i;
ElementB const *ptr_B = params.ptr_B;
ptr_A += batch_idx * params.batch_stride_A;
ptr_B += batch_idx * params.batch_stride_B;
ElementAccumulator accum = ElementAccumulator();
// Compute inner product
CUTLASS_PRAGMA_NO_UNROLL
for (int k = 0; k < params.problem_size.column(); ++k) {
// Fetch from A
ElementA a = ElementA();
if (i < params.problem_size.row()) {
a = *ptr_A;
}
ptr_A += params.ref_A.stride(0);
// Fetch from B
ElementB b = *ptr_B;
ptr_B += params.inc_B;
// Math
accum += ElementAccumulator(a) * ElementAccumulator(b);
}
//
// Epilogue phase
//
ElementC const *ptr_C = params.ptr_C + i * params.inc_C + batch_idx * params.batch_stride_C;
ElementC *ptr_D = params.ptr_D + i * params.inc_D + batch_idx * params.batch_stride_D;
EpilogueOutputOp output_op(params.output_op);
typename EpilogueOutputOp::FragmentAccumulator accum_fragment;
typename EpilogueOutputOp::FragmentOutput source_fragment;
typename EpilogueOutputOp::FragmentOutput output_fragment;
accum_fragment[0] = accum;
if (i < params.problem_size.row()) {
if (output_op.is_source_needed()) {
source_fragment[0] = *ptr_C;
output_fragment = output_op(accum_fragment, source_fragment);
}
else {
output_fragment = output_op(accum_fragment);
}
*ptr_D = output_fragment[0];
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// GEMV for row-major A matrix
template <
typename ElementA_,
typename ElementB_,
typename ElementC_,
typename ElementAccumulator_,
typename EpilogueOutputOp_,
int kElementsPerAccess_,
int kThreadCount_,
int kThreadsPerRow_
>
struct Gemv <
ElementA_,
layout::RowMajor,
ElementB_,
ElementC_,
ElementAccumulator_,
EpilogueOutputOp_,
kElementsPerAccess_,
kThreadCount_,
kThreadsPerRow_
>{
public:
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using TensorRefA = TensorRef<ElementA, LayoutA>;
using ElementB = ElementB_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using EpilogueOutputOp = EpilogueOutputOp_;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
static FloatRoundStyle const Round = cutlass::FloatRoundStyle::round_to_nearest;
// number of return elements in a global access
static int const kElementsPerAccess = kElementsPerAccess_;
using FragmentA = Array<ElementA, kElementsPerAccess>;
using FragmentB = Array<ElementB, kElementsPerAccess>;
using FragmentCompute = Array<ElementAccumulator, kElementsPerAccess>;
// thread block shape (kThreadsPerRow, kThreadCount / kThreadsPerRow, 1)
static int const kThreadCount = (kThreadCount_ <= 0) ? 128 : kThreadCount_;
static int const kThreadsPerRow = (kThreadsPerRow_ <= 0) ?
std::min(static_cast<int>(kThreadCount / (kElementsPerAccess * sizeof(ElementA))), 16)
: kThreadsPerRow_;
//
// Structures
//
/// Argument structure
struct Arguments {
MatrixCoord problem_size;
int32_t batch_count;
typename EpilogueOutputOp::Params output_op;
TensorRefA ref_A;
ElementB const *ptr_B;
ElementC const *ptr_C;
ElementC *ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
//
// Methods
//
Arguments(): batch_count(0) { }
Arguments(
MatrixCoord problem_size,
int32_t batch_count,
typename EpilogueOutputOp::Params output_op,
TensorRefA ref_A,
void const *ptr_B,
void const *ptr_C,
void *ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D
):
problem_size(problem_size),
batch_count(batch_count),
output_op(output_op),
ref_A(ref_A),
ptr_B(static_cast<ElementB const *>(ptr_B)),
ptr_C(static_cast<ElementC const *>(ptr_C)),
ptr_D(static_cast<ElementC *>(ptr_D)),
batch_stride_A(batch_stride_A),
batch_stride_B(batch_stride_B),
batch_stride_C(batch_stride_C),
batch_stride_D(batch_stride_D)
{ }
Arguments(
MatrixCoord problem_size,
typename EpilogueOutputOp::Params output_op,
TensorRefA ref_A,
void const *ptr_B,
void const *ptr_C,
void *ptr_D
):
Arguments(
problem_size,
1,
output_op,
ref_A,
ptr_B,
ptr_C,
ptr_D,
1,
1,
1,
1)
{ }
Status update(Arguments const &args) {
problem_size = args.problem_size;
batch_count = args.batch_count;
output_op = args.output_op;
ref_A = ref_A;
ptr_B = args.ptr_B;
ptr_C = args.ptr_C;
ptr_D = args.ptr_D;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C = args.batch_stride_C;
batch_stride_D = args.batch_stride_D;
return Status::kSuccess;
}
};
using Params = Arguments;
/// Shared memory storage structure
union SharedStorage {
};
public:
//
// Methods
//
CUTLASS_DEVICE
Gemv() {}
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::MatrixCoord const &problem_size) {
if (problem_size.column() % kElementsPerAccess != 0) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes one GEMV
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Loop over batch indices
for (int batch_idx = blockIdx.z; batch_idx < params.batch_count; batch_idx += gridDim.z) {
int idx_col_k = threadIdx.x;
int idx_row_m = blockIdx.x * blockDim.y + threadIdx.y;
if (idx_row_m < params.problem_size.row()) {
// problem_size (row = m, column = k)
// matrix A (batch, m, k)
// vector B (batch, 1, k)
// vector C (batch, m, 1)
// vector D (batch, m, 1)
// move in the batch dimension
ElementA const *ptr_A = params.ref_A.data() + batch_idx * params.batch_stride_A;
ElementB const *ptr_B = params.ptr_B + batch_idx * params.batch_stride_B;
ElementC const *ptr_C = params.ptr_C + batch_idx * params.batch_stride_C;
ElementC *ptr_D = params.ptr_D + batch_idx * params.batch_stride_D;
// move in the k dimension
ptr_A += idx_col_k * kElementsPerAccess;
ptr_B += idx_col_k * kElementsPerAccess;
// move in the m dimension
ptr_A += idx_row_m * params.problem_size.column();
ptr_C += idx_row_m;
ptr_D += idx_row_m;
NumericArrayConverter<ElementAccumulator, ElementA, kElementsPerAccess, Round> srcA_converter;
NumericArrayConverter<ElementAccumulator, ElementB, kElementsPerAccess, Round> srcB_converter;
ElementAccumulator accum = 0.f;
FragmentB fragB;
FragmentA fragA;
int unroll_col_k = 0;
// rows of the rolling tile
int const tileA_k = kThreadsPerRow * kElementsPerAccess;
for (; unroll_col_k < params.problem_size.column() / tileA_k * tileA_k; unroll_col_k += tileA_k) {
// fetch from matrix A
arch::global_load<FragmentA,
sizeof(FragmentA),
arch::CacheOperation::LastUse>(fragA, (ptr_A + unroll_col_k), true);
// fetch from vector B
arch::global_load<FragmentB,
sizeof(FragmentB),
arch::CacheOperation::Always>(fragB, (ptr_B + unroll_col_k), true);
FragmentCompute fragB_Compute = srcB_converter(fragB);
FragmentCompute fragA_Compute = srcA_converter(fragA);
// Math
CUTLASS_PRAGMA_UNROLL
for (int e = 0; e < kElementsPerAccess; e++) {
accum += fragA_Compute.at(e) * fragB_Compute.at(e);
}
}
// calculate the rest of K elements
// each thread fetch 1 element each time
for (int k = unroll_col_k + idx_col_k; k < params.problem_size.column(); k += kThreadsPerRow) {
ElementB b = *(ptr_B - idx_col_k * kElementsPerAccess + k);
ElementA a = *(ptr_A - idx_col_k * kElementsPerAccess + k);
accum += ElementAccumulator(a) * ElementAccumulator(b);
}
EpilogueOutputOp output_op(params.output_op);
typename EpilogueOutputOp::FragmentOutput source_fragment;
// prefetch from source matrix C
if (output_op.is_source_needed()) {
source_fragment[0] = *(ptr_C);
}
typename EpilogueOutputOp::FragmentAccumulator accum_fragment;
typename EpilogueOutputOp::FragmentOutput output_fragment;
for (int mask = (kThreadsPerRow >> 1); mask > 0; mask >>= 1) {
accum += __shfl_xor_sync(0xFFFFFFFF, accum, mask, 32);
}
if (idx_col_k == 0) {
accum_fragment[0] = accum;
if (output_op.is_source_needed()) {
output_fragment = output_op(accum_fragment, source_fragment);
}
else {
output_fragment = output_op(accum_fragment);
}
*ptr_D = output_fragment[0];
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemv.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemv.h",
"repo_id": "include",
"token_count": 7933
} | 28 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/kernel_hardware_info.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include "cutlass/arch/reg_reconfig.h"
#include "cutlass/arch/mma_sm90.h"
#include "cutlass/epilogue/collective/detail.hpp"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp"
#include "cutlass/pipeline/pipeline.hpp"
#include "cute/tensor.hpp"
///////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::kernel {
///////////////////////////////////////////////////////////////////////////////
template <
class ProblemShape_,
class CollectiveMainloop_,
class CollectiveEpilogue_,
class TileScheduler_
>
class GemmUniversal<
ProblemShape_,
CollectiveMainloop_,
CollectiveEpilogue_,
TileScheduler_,
cute::enable_if_t<cute::is_base_of_v<KernelCpAsyncWarpSpecialized, typename CollectiveMainloop_::DispatchPolicy::Schedule>>>
{
public:
//
// Type Aliases
//
using ProblemShape = ProblemShape_;
static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4,
"ProblemShape{} should be <M,N,K> or <M,N,K,L>");
// Mainloop derived types
using CollectiveMainloop = CollectiveMainloop_;
using TileShape = typename CollectiveMainloop::TileShape;
using TiledMma = typename CollectiveMainloop::TiledMma;
using ArchTag = typename CollectiveMainloop::ArchTag;
using ElementA = typename CollectiveMainloop::ElementA;
using StrideA = typename CollectiveMainloop::StrideA;
using ElementB = typename CollectiveMainloop::ElementB;
using StrideB = typename CollectiveMainloop::StrideB;
using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy;
using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator;
using ClusterShape = typename DispatchPolicy::ClusterShape;
using MainloopArguments = typename CollectiveMainloop::Arguments;
using MainloopParams = typename CollectiveMainloop::Params;
static_assert(ArchTag::kMinComputeCapability >= 90);
// Epilogue derived types
using CollectiveEpilogue = CollectiveEpilogue_;
using ElementC = typename CollectiveEpilogue::ElementC;
using StrideC = typename CollectiveEpilogue::StrideC;
using ElementD = typename CollectiveEpilogue::ElementD;
using StrideD = typename CollectiveEpilogue::StrideD;
using EpilogueArguments = typename CollectiveEpilogue::Arguments;
using EpilogueParams = typename CollectiveEpilogue::Params;
static_assert(cute::is_void_v<TileScheduler_> or cute::is_same_v<TileScheduler_, PersistentScheduler>,
"Non-persistent warp-specialized kernel does not support specializing the tile scheduler.");
using TileSchedulerTag = TileScheduler_;
using TileScheduler = typename detail::TileSchedulerSelector<
TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler;
using TileSchedulerArguments = typename TileScheduler::Arguments;
// Kernel level shared memory storage
struct SharedStorage {
union TensorStorage {
using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage;
using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage;
MainloopTensorStorage mainloop;
EpilogueTensorStorage epilogue;
} tensors;
struct PipelineStorage : cute::aligned_struct<16> {
using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage;
using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage;
alignas(16) MainloopPipelineStorage mainloop;
alignas(16) EpiLoadPipelineStorage epi_load;
} pipelines;
};
static constexpr int SharedStorageSize = sizeof(SharedStorage);
using GmemTiledCopyA = typename CollectiveMainloop::GmemTiledCopyA;
using GmemTiledCopyB = typename CollectiveMainloop::GmemTiledCopyB;
static_assert(cute::size(GmemTiledCopyA{}) == cute::size(GmemTiledCopyB{}), "Number of threads in A/B tiled copies must be the same.");
static constexpr uint32_t NumLoadWarpGroups = cute::size(GmemTiledCopyA{}) / NumThreadsPerWarpGroup;
static constexpr uint32_t NumMmaWarpGroups = cute::size(TiledMma{}) / NumThreadsPerWarpGroup;
static constexpr uint32_t NumWarpGroups = NumLoadWarpGroups + NumMmaWarpGroups;
static_assert(NumWarpGroups == 2 || NumWarpGroups == 3, "Number of warp groups must be 2 or 3 for good performance.");
static constexpr uint32_t MaxThreadsPerBlock = NumWarpGroups * NumThreadsPerWarpGroup;
static constexpr uint32_t MinBlocksPerMultiprocessor = 1;
// Device side arguments
struct Arguments {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopArguments mainloop{};
EpilogueArguments epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerArguments scheduler{};
};
// Kernel entry point API
struct Params {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopParams mainloop{};
EpilogueParams epilogue{};
};
//
// Methods
//
// Convert to underlying arguments. In this case, a simple copy for the aliased type.
static
Params
to_underlying_arguments(Arguments const& args, void* workspace) {
(void) workspace;
auto problem_shape = args.problem_shape;
if constexpr (detail::Has_SwapAB_v<CollectiveMainloop>) {
// swap M/N
get<0>(problem_shape) = get<1>(args.problem_shape);
get<1>(problem_shape) = get<0>(args.problem_shape);
}
return {
args.mode,
problem_shape,
CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace),
CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace)
};
}
static bool
can_implement(Arguments const& args) {
bool implementable = (args.mode == GemmUniversalMode::kGemm) or
(args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4);
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n");
return implementable;
}
implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop);
implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue);
implementable &= TileScheduler::can_implement(args.scheduler);
return implementable;
}
static
size_t
get_workspace_size(Arguments const& args) {
return 0;
}
static
cutlass::Status
initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr,
CudaHostAdapter* cuda_adapter = nullptr) {
return Status::kSuccess;
}
// Computes the kernel launch grid shape based on runtime parameters
static dim3
get_grid_shape(Params const& params) {
auto cluster_shape = Shape<_1,_1,_1>{};
auto tile_shape = TileShape{};
auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{});
return TileScheduler::get_tiled_cta_shape_mnl(
problem_shape_MNKL, tile_shape, cluster_shape);
}
static dim3
get_block_shape() {
return dim3(MaxThreadsPerBlock, 1, 1);
}
CUTLASS_DEVICE
void
operator()(Params const& params, char* smem_buf) {
using namespace cute;
using X = Underscore;
// Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a.
#if ! defined(__CUDA_ARCH_FEAT_SM90_ALL)
printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n");
#else
enum class WarpGroupRole {
Producer = 0,
Consumer = 1,
};
// Kernel level shared memory storage
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(smem_buf);
int thread_idx = int(threadIdx.x);
int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup;
int warp_group_idx = canonical_warp_group_idx();
CUTLASS_ASSERT(warp_group_idx < NumWarpGroups);
WarpGroupRole warp_group_role = warp_group_idx < NumLoadWarpGroups ? WarpGroupRole::Producer : WarpGroupRole::Consumer;
// Mainloop Load pipeline
using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline;
typename MainloopPipeline::Params mainloop_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer;
}
mainloop_pipeline_params.producer_arv_count = NumLoadWarpGroups * NumThreadsPerWarpGroup;
mainloop_pipeline_params.consumer_arv_count = NumMmaWarpGroups * NumThreadsPerWarpGroup;
MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params);
// Epilogue Load pipeline
using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline;
typename EpiLoadPipeline::Params epi_load_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer;
}
epi_load_pipeline_params.producer_arv_count = NumLoadWarpGroups * NumThreadsPerWarpGroup;
epi_load_pipeline_params.consumer_arv_count = NumMmaWarpGroups * NumThreadsPerWarpGroup;
EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params);
// Epilogue Store pipeline
using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline;
typename EpiStorePipeline::Params epi_store_pipeline_params;
epi_store_pipeline_params.always_wait = true;
EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params);
// Initialize starting pipeline states for the collectives
// Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding)
typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state;
typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state;
// For the DMA Load (producer) we start with an opposite phase
// i.e., we skip all waits since we know that the buffer is indeed empty
PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state<MainloopPipeline>();
PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state<EpiLoadPipeline>();
PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state<EpiStorePipeline>();
// Preconditions
static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
// Separate out problem shape for convenience
// Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{});
auto M = get<0>(problem_shape_MNKL);
auto N = get<1>(problem_shape_MNKL);
auto K = get<2>(problem_shape_MNKL);
auto L = get<3>(problem_shape_MNKL);
// Represent the full tensors
Tensor mA_mkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_A), make_shape(M,K,L), params.mainloop.dA); //(m,k,l)
Tensor mB_nkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_B), make_shape(N,K,L), params.mainloop.dB); //(n,k,l)
// Get the appropriate blocks for this thread block -- potential for thread block locality
auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K)
TiledMma tiled_mma;
// Make tiled views, defer the slice
Tensor gA_mkl = local_tile(mA_mkl, blk_shape, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l)
Tensor gB_nkl = local_tile(mB_nkl, blk_shape, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l)
// Compute m_coord, n_coord, and l_coord with their post-tiled shapes
auto m_coord = idx2crd(int(blockIdx.x), shape<2>(gA_mkl));
auto n_coord = idx2crd(int(blockIdx.y), shape<2>(gB_nkl));
auto l_coord = idx2crd(int(blockIdx.z), shape<4>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, l_coord);
// Slice with m_coord and n_coord
Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k)
Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k)
// Get pipeline iterators and increments from tensor shapes
auto k_tile_iter = cute::make_coord_iterator(shape<2>(gA));
auto k_tile_count = size<2>(gA);
auto c_tile_count = CollectiveEpilogue::get_load_pipe_increment(blk_shape);
auto d_tile_count = CollectiveEpilogue::get_store_pipe_increment(blk_shape);
// Wait for all threads in the thread block
__syncthreads();
// In a warp specialized kernel, collectives expose data movement and compute operations separately
CollectiveMainloop collective_mainloop;
CollectiveEpilogue collective_epilogue{params.epilogue, shared_storage.tensors.epilogue};
if (warp_group_role == WarpGroupRole::Producer) {
// Compute tile residues for predication
auto m_max_coord = M - size<0>(gA) * get<0>(blk_coord); // M - BLK_M * m_coord
auto n_max_coord = N - size<0>(gB) * get<1>(blk_coord); // N - BLK_N * n_coord
auto k_residue = K - size<1>(gA) * size<2>(gA); // K - BLK_K * k_coord_max
auto residue_mnk = make_tuple(m_max_coord, n_max_coord, k_residue);
collective_mainloop.load(
mainloop_pipeline,
mainloop_pipe_producer_state,
gA,
gB,
k_tile_iter, k_tile_count,
residue_mnk,
thread_idx,
shared_storage.tensors.mainloop
);
// Update starting mainloop pipeline state for the pipeline drain
mainloop_pipe_producer_state.advance(k_tile_count);
// Make sure mainloop consumer has been waited upon before issuing epilogue load
collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state);
if (collective_epilogue.is_producer_load_needed()) {
epi_load_pipe_producer_state =
collective_epilogue.load(
epi_load_pipeline,
epi_load_pipe_producer_state,
problem_shape_MNKL,
blk_shape,
blk_coord,
tiled_mma,
thread_idx,
shared_storage.tensors.epilogue
);
collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state);
}
}
else if (warp_group_role == WarpGroupRole::Consumer) {
Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N)
collective_mainloop.mma(
mainloop_pipeline,
mainloop_pipe_consumer_state,
accumulators,
k_tile_count,
warp_group_thread_idx,
shared_storage.tensors.mainloop,
params.mainloop
);
// Make sure the math instructions are done and free buffers before entering the epilogue
collective_mainloop.mma_tail(
mainloop_pipeline,
mainloop_pipe_consumer_state,
k_tile_count
);
// Epilogue and write to gD
collective_epilogue.store(
epi_load_pipeline,
epi_load_pipe_consumer_state,
epi_store_pipeline,
epi_store_pipe_producer_state,
problem_shape_MNKL,
blk_shape,
blk_coord,
accumulators,
tiled_mma,
warp_group_thread_idx,
shared_storage.tensors.epilogue
);
}
#endif
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::kernel
| include/cutlass/gemm/kernel/sm90_gemm_warpspecialized.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/sm90_gemm_warpspecialized.hpp",
"repo_id": "include",
"token_count": 6814
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for multiply-add operations
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
#include "cutlass/functional.h"
#include "cutlass/reduction/thread/reduce.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Structure to compute the matrix product for HFMA
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Type of GEMM inner vs outer product
bool
>
struct Mma_HFMA2;
/////////////////////////////
// Specialization for NNN //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::ColumnMajor,
layout::ColumnMajor,
layout::ColumnMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kM % 2),
"Mma_HFMA2 requires the M dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x1x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 1> const *ptr_B = reinterpret_cast<Array<half_t, 1> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
Array<half_t, 2> tmp { ptr_D[n*Shape::kM/2 + m] };
mma(
tmp,
ptr_A[k*Shape::kM/2 + m],
ptr_B[n*Shape::kK + k],
tmp);
ptr_D[n*Shape::kM/2 + m] = tmp;
}
}
}
}
};
/////////////////////////////
// Specialization for NNT //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2<
Shape_,
layout::ColumnMajor,
layout::ColumnMajor,
layout::RowMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kN % 2),
"Mma_HFMA2 requires the N dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 1> const *ptr_A = reinterpret_cast<Array<half_t, 1> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
Array<half_t, 2> tmp { ptr_D[m*Shape::kN/2 + n] };
Array<half_t, 2> tmp_B;
tmp_B[0] = ptr_B->at(2*n*Shape::kK + k);
tmp_B[1] = ptr_B->at((2*n+1)*Shape::kK + k);
mma(
tmp,
ptr_A[k*Shape::kM + m],
tmp_B,
tmp);
ptr_D[m*Shape::kN/2 + n] = tmp;
}
}
}
}
};
/////////////////////////////
// Specialization for NTN //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::ColumnMajor,
layout::RowMajor,
layout::ColumnMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kM % 2),
"Mma_HFMA2 requires the GEMM M dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
using Mma = arch::Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 1> const *ptr_B = reinterpret_cast<Array<half_t, 1> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK / Mma::Shape::kK; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM / Mma::Shape::kM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN / Mma::Shape::kN; ++n) {
Array<half_t, 2> tmp { ptr_D[m + n * Shape::kM/2] };
mma(
tmp,
ptr_A[m + k * Shape::kM/2],
ptr_B[k * Shape::kN + n],
tmp);
ptr_D[m + n * Shape::kM/2] = tmp;
}
}
}
}
};
/////////////////////////////
// Specialization for NTT //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2<
Shape_,
layout::ColumnMajor,
layout::RowMajor,
layout::RowMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kN % 2),
"Mma_HFMA2 requires the N dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 1> const *ptr_A = reinterpret_cast<Array<half_t, 1> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
Array<half_t, 2> tmp { ptr_D[m*Shape::kN/2 + n] };
mma(
tmp,
ptr_A[k*Shape::kM + m],
ptr_B[k*Shape::kN/2 + n],
tmp);
ptr_D[m*Shape::kN/2 + n] = tmp;
}
}
}
}
};
/////////////////////////////
// Specialization for TNN //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::RowMajor,
layout::ColumnMajor,
layout::ColumnMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kM % 2),
"Mma_HFMA2 requires the M dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x1x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 1> const *ptr_B = reinterpret_cast<Array<half_t, 1> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
Array<half_t, 2> tmp { ptr_D[n*Shape::kM/2 + m] };
Array<half_t, 2> tmp_A;
tmp_A[0] = ptr_A->at(2*m*Shape::kK + k);
tmp_A[1] = ptr_A->at((2*m+1)*Shape::kK + k);
mma(
tmp,
tmp_A,
ptr_B[n*Shape::kK + k],
tmp);
ptr_D[n*Shape::kM/2 + m] = tmp;
}
}
}
}
};
/////////////////////////////
// Specialization for TNT //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::RowMajor,
layout::ColumnMajor,
layout::RowMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kN % 2),
"Mma_HFMA2 requires the N dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 1> const *ptr_A = reinterpret_cast<Array<half_t, 1> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
Array<half_t, 2> tmp { ptr_D[m*Shape::kN/2 + n] };
Array<half_t, 2> tmp_B;
tmp_B[0] = ptr_B->at(2*n*Shape::kK + k);
tmp_B[1] = ptr_B->at((2*n+1)*Shape::kK + k);
mma(
tmp,
ptr_A[m*Shape::kK + k],
tmp_B,
tmp);
ptr_D[m*Shape::kN/2 + n] = tmp;
}
}
}
}
};
/////////////////////////////
// Specialization for TTN //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::RowMajor,
layout::RowMajor,
layout::ColumnMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kM % 2),
"Mma_HFMA2 requires the M dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 1> const *ptr_B = reinterpret_cast<Array<half_t, 1> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
Array<half_t, 2> tmp { ptr_D[n*Shape::kM/2 + m] };
Array<half_t, 2> tmp_A;
tmp_A[0] = ptr_A->at(2*m*Shape::kK + k);
tmp_A[1] = ptr_A->at((2*m+1)*Shape::kK + k);
mma(
tmp,
tmp_A,
ptr_B[k*Shape::kN + n],
tmp);
ptr_D[n*Shape::kM/2 + m] = tmp;
}
}
}
}
};
/////////////////////////////
// Specialization for TTT //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2<
Shape_,
layout::RowMajor,
layout::RowMajor,
layout::RowMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kN % 2),
"Mma_HFMA2 requires the N dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 1> const *ptr_A = reinterpret_cast<Array<half_t, 1> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
Array<half_t, 2> tmp { ptr_D[m*Shape::kN/2 + n] };
mma(
tmp,
ptr_A[m*Shape::kK + k],
ptr_B[k*Shape::kN/2 + n],
tmp);
ptr_D[m*Shape::kN/2 + n] = tmp;
}
}
}
}
};
/////////////////////////////////////////////////////////////////////
// Specialization for TNT + Inner Product or 1x1x2K + LayoutC = T //
/////////////////////////////////////////////////////////////////////
template <typename Shape_, typename LayoutA, typename LayoutB>
struct Mma_HFMA2<
Shape_,
LayoutA,
LayoutB,
layout::RowMajor,
false
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kK % 2),
"Mma_HFMA2 requires the K dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x1x2 HFMA2 sequence for bulk of computation
using GemmShape = gemm::GemmShape<1,1,2>;
Array<half_t, 1> *ptr_D = reinterpret_cast<Array<half_t, 1> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
// Inner product is calculated using MACs, followed by final reduction
multiply_add<Array<half_t, 2>> mac;
cutlass::reduction::thread::Reduce< plus<half_t>, Array<half_t, 2> > reduce;
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / GemmShape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / GemmShape::kM; m++){
Array<half_t, 2> tmp_C;
tmp_C.clear();
Array<half_t, 1> *ptr_tmp_C = reinterpret_cast<Array<half_t, 1> *>(&tmp_C);
ptr_tmp_C[0] = ptr_D[n*Shape::kM + m];
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / GemmShape::kK; k++){
tmp_C = mac(ptr_A[m*Shape::kK/2 + k], ptr_B[n*Shape::kK/2 + k], tmp_C);
}
Array<half_t, 1> res;
Array<half_t, 1> *ptr_res = &res;
res = reduce(tmp_C);
ptr_D[m*Shape::kN + n] = ptr_res[0];
}
}
}
};
/////////////////////////////////////////////////////////////////////
// Specialization for TNN + Inner Product or 1x1x2K + LayoutC = N //
/////////////////////////////////////////////////////////////////////
template <typename Shape_, typename LayoutA, typename LayoutB>
struct Mma_HFMA2<
Shape_,
LayoutA,
LayoutB,
layout::ColumnMajor,
false
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kK % 2),
"Mma_HFMA2 requires the K dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x1x2 HFMA2 sequence for bulk of computation
using GemmShape= gemm::GemmShape<1,1,2>;
Array<half_t, 1> *ptr_D = reinterpret_cast<Array<half_t, 1> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
// Inner product is calculated using MACs, followed by final reduction
multiply_add<Array<half_t, 2>> mac;
cutlass::reduction::thread::Reduce< plus<half_t>, Array<half_t, 2> > reduce;
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / GemmShape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / GemmShape::kM; m++){
Array<half_t, 2> tmp_C;
tmp_C.clear();
Array<half_t, 1> *ptr_tmp_C = reinterpret_cast<Array<half_t, 1> *>(&tmp_C);
ptr_tmp_C[0] = ptr_D[n*Shape::kM + m];
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / GemmShape::kK; k++){
tmp_C = mac(ptr_A[m*Shape::kK/2 + k], ptr_B[n*Shape::kK/2 + k], tmp_C);
}
Array<half_t, 1> res;
Array<half_t, 1> *ptr_res = &res;
res = reduce(tmp_C);
ptr_D[n*Shape::kM + m] = ptr_res[0];
}
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_, typename LayoutA, typename LayoutB, typename LayoutC
>
struct Mma<
Shape_,
half_t,
LayoutA,
half_t,
LayoutB,
half_t,
LayoutC,
arch::OpMultiplyAdd
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = half_t;
/// Data type of operand B
using ElementB = half_t;
/// Element type of operand C
using ElementC = half_t;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
static bool const a_row_major = platform::is_same< LayoutA, layout::RowMajor>::value;
static bool const b_column_major = platform::is_same< LayoutB, layout::ColumnMajor>::value;
static bool const c_row_major = platform::is_same< LayoutC, layout::RowMajor>::value;
static bool const c_column_major = platform::is_same< LayoutC, layout::ColumnMajor>::value;
static bool const m_mod2 = !(Shape::kM % 2);
static bool const n_mod2 = !(Shape::kN % 2);
static bool const k_mod2 = !(Shape::kK % 2);
// HFMA based MMA optimizations are of 2 types :
// 1. Inner product
// 2. Outer product
// It is chosen based on LayoutC (for outer product gemm) or
// Using LayoutA and LayoutB or shape=1x1x2K (for inner product gemms)
// If all fails, we choose the generic MMA
static bool const use_outer_prod = (c_column_major && m_mod2) || (c_row_major && n_mod2);
static bool const use_inner_prod = (a_row_major && b_column_major && k_mod2) || (Shape::kM==1 && Shape::kN==1 && k_mod2);
static bool const use_optimized = (use_outer_prod || use_inner_prod);
using ArchMmaOperator = typename platform::conditional< use_optimized,
detail::Mma_HFMA2<Shape, LayoutA, LayoutB, LayoutC, use_outer_prod>,
MmaGeneric <Shape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, Operator>
>::type;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
ArchMmaOperator mma;
mma(D, A, B, C);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Determines whether to enable thread::Gemm<> specializations compatible with SM50
template <
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB>
struct EnableMma_Crow_SM60 {
static bool const kIsConventionalLayout =
(platform::is_same<LayoutA, layout::RowMajor>::value ||
platform::is_same<LayoutA, layout::ColumnMajor>::value) &&
(platform::is_same<LayoutB, layout::RowMajor>::value ||
platform::is_same<LayoutB, layout::ColumnMajor>::value);
static bool const value = kIsConventionalLayout;
};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes matrix product when C is row-major
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
typename LayoutA_,
typename LayoutB_
>
struct Mma<
Shape_,
half_t,
LayoutA_,
half_t,
LayoutB_,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd,
typename platform::enable_if<detail::EnableMma_Crow_SM60<
LayoutA_,
LayoutB_
>::value>::type>{
using Shape = Shape_;
using ElementA = half_t;
using LayoutA = LayoutA_;
using ElementB = half_t;
using LayoutB = LayoutB_;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using Operator = arch::OpMultiplyAdd;
using TransposeMma = Mma<
GemmShapeTranspose<Shape>,
half_t,
typename layout::LayoutTranspose<LayoutB>::type,
half_t,
typename layout::LayoutTranspose<LayoutA>::type,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd,
bool>;
using FragmentA = Array<ElementA, Shape::kMK>;
using FragmentB = Array<ElementB, Shape::kKN>;
using FragmentC = Array<ElementC, Shape::kMN>;
using ArchMmaOperator = typename TransposeMma::ArchMmaOperator;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TransposeMma mma;
mma(D, B, A, C);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/thread/mma_sm60.h/0 | {
"file_path": "include/cutlass/gemm/thread/mma_sm60.h",
"repo_id": "include",
"token_count": 12422
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
/// Used for partial specialization
typename Enable = bool>
class MmaMultistage :
public MmaBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = MmaBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Policy describing tuning details
using Policy = Policy_;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Minimum architecture is Sm80 to support cp.async
using ArchTag = arch::Sm80;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
/// Internal structure exposed for introspection.
struct Detail {
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA =
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
// Optional staged-accumulation (e.g., tf32x3 kernels) for improved numerical
// accuracy, where each mainloop iteration first accumulates into a temporary
// set of freshly-cleared accumulators, which are subsequently added to the
// final accumulator set.
static bool const kStagedAccumulation = arch::detail::UseStagedAccumulation<Operator>::value;
};
private:
// Structure encapsulating pipeline state live from one iteration to the next
struct PipeState {
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
/// Temporary accumulator to facilitate staged-accumulation
FragmentC tmp_accum_;
/// Pair of A fragments used to overlap shared memory loads and math instructions
WarpLoadedFragmentA warp_loaded_frag_A_[2];
WarpTransformedFragmentA warp_transformed_frag_A_[2];
/// Pair of B fragments used to overlap shared memory loads and math instructions
WarpLoadedFragmentB warp_loaded_frag_B_[2];
WarpTransformedFragmentB warp_transformed_frag_B_[2];
};
private:
//
// Data members
//
/// Warp-level MMA operator
Operator warp_mma_;
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
/// Shared memory write stage index
int smem_write_stage_idx_;
/// Shared memory read stage index
int smem_read_stage_idx_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx),
smem_write_stage_idx_(0),
smem_read_stage_idx_(0)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
/// Advance shared memory read-iterators to the next stage
CUTLASS_DEVICE
void advance_smem_read_stage()
{
++smem_read_stage_idx_;
if (smem_read_stage_idx_ == Base::kStages) {
// Wrap back around to the 'start' of the circular buffer in shared memory
this->warp_tile_iterator_A_.add_tile_offset({0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset({-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
smem_read_stage_idx_ = 0;
}
}
/// Advance global memory read-iterators and shared memory write-iterators to the stage
CUTLASS_DEVICE
void advance_smem_write_stage(
IteratorA &iterator_A,
IteratorB &iterator_B)
{
// Advance global iterators
iterator_A.add_tile_offset({0, 1});
iterator_B.add_tile_offset({1, 0});
// Advance shared iterators
smem_iterator_A_.add_tile_offset({0, 1});
smem_iterator_B_.add_tile_offset({1, 0});
// Increment shared memory write stage index
++smem_write_stage_idx_;
if (smem_write_stage_idx_ == Base::kStages) {
// Wrap back around to the 'start' of the circular buffer in shared memory
smem_iterator_A_.add_tile_offset({0, -Base::kStages});
smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx_ = 0;
}
}
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B,
int group_start_A = 0, int group_start_B = 0) {
iterator_A.set_iteration_index(group_start_A *
IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_A.get();
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, iterator_A.valid());
} else {
cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, iterator_A.valid());
}
++iterator_A;
}
++this->smem_iterator_A_;
}
}
iterator_B.set_iteration_index(group_start_B *
IteratorB::kAccessesPerVector);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B.get();
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B.valid());
} else {
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B.valid());
}
++iterator_B;
}
++this->smem_iterator_B_;
}
}
}
/// GEMM prologue. Bootstrap the global->shared memory pipeline by fetching
/// the global fragments needed by the first kStages-1 threadblock mainloop iterations
CUTLASS_DEVICE
void prologue(
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B, ///< [in|out] iterator over B operand in global memory
int &gemm_k_iterations) ///< [in|out] number of threadblock mainloop iterations remaining
{
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) {
// Disable global fetching if done with global fetch iterations
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
int src_bytes = (iterator_A.valid() ? kSrcBytes : 0);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, iterator_B.get(), iterator_B.valid());
++iterator_B;
}
++this->smem_iterator_B_;
}
// Move to the next write stage
advance_smem_write_stage(iterator_A, iterator_B);
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
// Optionally clear the remaining stages of SMEM. This is a functional requirement for
// some kernels so that all accumulator elements outside the GEMM footprint are zero.
if (SharedMemoryClear == SharedMemoryClearOption::kClearLastStage) {
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA last_smem_iterator_A(this->smem_iterator_A_);
typename IteratorA::AccessType zero_A;
zero_A.clear();
last_smem_iterator_A.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
last_smem_iterator_A.get());
*dst_ptr = zero_A;
++last_smem_iterator_A;
}
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB last_smem_iterator_B(this->smem_iterator_B_);
typename IteratorB::AccessType zero_B;
zero_B.clear();
last_smem_iterator_B.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
last_smem_iterator_B.get());
*dst_ptr = zero_B;
++last_smem_iterator_B;
}
}
}
/// Wait until we have at least one completed global fetch stage
CUTLASS_DEVICE
void gmem_wait()
{
// Wait until we have at least one committed global fetch stage. (#uncommitted = Base::kStages - 1 - #committed)
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
}
/// Perform a threadblock mainloop iteration of matrix multiply-accumulate
CUTLASS_DEVICE
void mac_loop_iter(
PipeState &pipe_state, ///< [in|out] loop-carried pipeline state
FragmentC &accum, ///< [in|out] destination accumulator tile
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B, ///< [in|out] iterator over B operand in global memory
int &gemm_k_iterations) ///< [in|out] number of threadblock mainloop iterations remaining
{
// Unroll the warp-level MMA tiles of a threadblock's mainloop iteration
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load the next warp-tile's A fragment from shared memory
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(pipe_state.warp_loaded_frag_A_[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
// Load the next warp-tile's B fragment from shared memory
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.load(pipe_state.warp_loaded_frag_B_[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_B_;
// Except for the first warp-tile, all warp-tiles convert their incoming shared memory fragments as necessary
if (warp_mma_k > 0) {
warp_mma_.transform(
pipe_state.warp_transformed_frag_A_[warp_mma_k % 2],
pipe_state.warp_transformed_frag_B_[warp_mma_k % 2],
pipe_state.warp_loaded_frag_A_[warp_mma_k % 2],
pipe_state.warp_loaded_frag_B_[warp_mma_k % 2]);
}
// Execute the current warp-tile of MMA operations
if (Detail::kStagedAccumulation) {
warp_mma_(
pipe_state.tmp_accum_,
pipe_state.warp_transformed_frag_A_[warp_mma_k % 2],
pipe_state.warp_transformed_frag_B_[warp_mma_k % 2],
pipe_state.tmp_accum_
);
if (warp_mma_k == 0) {
plus<FragmentC> plus_accum;
accum = plus_accum(accum, pipe_state.tmp_accum_);
pipe_state.tmp_accum_.clear();
}
} else {
warp_mma_(
accum,
pipe_state.warp_transformed_frag_A_[warp_mma_k % 2],
pipe_state.warp_transformed_frag_B_[warp_mma_k % 2],
accum
);
}
// Except for the last warp-tile, all warp-tiles issue their share of
// global->shared fragment copies
if (warp_mma_k < Base::kWarpGemmIterations - 1) {
int group_start_iteration_A, group_start_iteration_B;
group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA;
group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB;
copy_tiles_and_advance(
iterator_A,
iterator_B,
group_start_iteration_A,
group_start_iteration_B);
}
// The second-to-last warp-tile also:
// - performs the last warp-tile's share of global->shared fragment copies
// - moves to the next global fetch stage
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
// Performs the last warp-tile's share of global->shared fragment copies
int group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA;
int group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB;
copy_tiles_and_advance(
iterator_A,
iterator_B,
group_start_iteration_A,
group_start_iteration_B);
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Wait until we have at least one completed global fetch stage
gmem_wait();
// Move to the next global fetch stage
advance_smem_write_stage(iterator_A, iterator_B);
advance_smem_read_stage();
// Disable global fetching when done with global fetch iterations
--gemm_k_iterations;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
}
// The last warp-tile also converts the shared memory fragments used by
// the first warp-tile of the next iteration, if necessary (so we can
// immediately start issuing MMA instructions at the top of the loop )
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
warp_mma_.transform(
pipe_state.warp_transformed_frag_A_[(warp_mma_k + 1) % 2],
pipe_state.warp_transformed_frag_B_[(warp_mma_k + 1) % 2],
pipe_state.warp_loaded_frag_A_[(warp_mma_k + 1) % 2],
pipe_state.warp_loaded_frag_B_[(warp_mma_k + 1) % 2]);
}
}
}
/// Perform the specified number of threadblock mainloop iterations of matrix
/// multiply-accumulate. Assumes prologue has been initiated.
CUTLASS_DEVICE
void gemm_iters(
int gemm_k_iterations, ///< number of threadblock mainloop iterations
FragmentC &accum, ///< [in|out] accumulator tile
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B) ///< [in|out] iterator over B operand in global memory
{
PipeState pipe_state;
// Disable global fetching if done with global fetch iterations
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
// Load first warp-tile's A fragment from shared memory
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(pipe_state.warp_loaded_frag_A_[0]);
++this->warp_tile_iterator_A_;
// Load first warp-tile's B fragment from shared memory
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_B_.load(pipe_state.warp_loaded_frag_B_[0]);
++this->warp_tile_iterator_B_;
// Transform, if necessary, the first warp-tile's shared memory fragments
warp_mma_.transform(
pipe_state.warp_transformed_frag_A_[0],
pipe_state.warp_transformed_frag_B_[0],
pipe_state.warp_loaded_frag_A_[0],
pipe_state.warp_loaded_frag_B_[0]);
if (Detail::kStagedAccumulation) {
pipe_state.tmp_accum_.clear();
}
// Mainloop
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
mac_loop_iter(
pipe_state,
accum,
iterator_A,
iterator_B,
gemm_k_iterations);
}
if (Detail::kStagedAccumulation) {
plus<FragmentC> plus_accum;
accum = plus_accum(accum, pipe_state.tmp_accum_);
}
// Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
/// Prepares the class for another prologue.
CUTLASS_DEVICE
void wind_down()
{
// Catch-up the smem-read iterator to the smem-write iterator (so this class can be reused for another tile's prologue)
// First, increment remaining warp tiles to get to the next full stage. (Ideally we would
// just decrement one tile, but not all iterators implement --() decrement.)
#pragma unroll
for (int warp_mma_k = 1; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k)
{
this->warp_tile_iterator_A_.set_kgroup_index(warp_mma_k);
this->warp_tile_iterator_B_.set_kgroup_index(warp_mma_k);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
}
smem_read_stage_idx_++;
// Then wrap back two full stages (one for the tile advancing we just did, and one to catch the write iterators)
static const int kStageIters = Policy::kPartitionsK * Base::kWarpGemmIterations;
if (smem_read_stage_idx_ > 1)
{
this->warp_tile_iterator_A_.add_tile_offset({0, (-2 * kStageIters)});
this->warp_tile_iterator_B_.add_tile_offset({(-2 * kStageIters), 0});
}
else
{
this->warp_tile_iterator_A_.add_tile_offset({0, ((Base::kStages - 2) * kStageIters)});
this->warp_tile_iterator_B_.add_tile_offset({((Base::kStages - 2) * kStageIters), 0});
}
smem_read_stage_idx_ = smem_write_stage_idx_;
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
///< initial value of accumulator
FragmentC const &src_accum) {
// Prologue (start fetching iterations of global fragments into shared memory)
prologue(iterator_A, iterator_B, gemm_k_iterations);
// Wait until we have at least one completed global fetch stage
gmem_wait();
// Initialize destination accumulators with source accumulators
accum = src_accum;
// Perform the MAC-iterations
gemm_iters(gemm_k_iterations, accum, iterator_A, iterator_B);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/mma_multistage.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/mma_multistage.h",
"repo_id": "include",
"token_count": 10949
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
enum class TensorFloat32Op {
k3xTF32,
k4xTF32
};
template <
/// Floating-point rounding style
FloatRoundStyle RoundBigA_,
/// Floating-point rounding style
FloatRoundStyle RoundSmallA_,
/// Floating-point rounding style
FloatRoundStyle RoundBigB_ = RoundBigA_,
/// Floating-point rounding style
FloatRoundStyle RoundSmallB_ = RoundSmallA_,
/// Precision for TensorFloat32Op
// (k3xTF32: BigxBig, BigxSmall, SmallxBig)
// (k4xTF32: BigxBig, BigxSmall, SmallxBig, SmallxSmall)
TensorFloat32Op Precision_ = TensorFloat32Op::k3xTF32
>
struct FastF32 {
static FloatRoundStyle const kRoundBigA = RoundBigA_;
static FloatRoundStyle const kRoundSmallA = RoundSmallA_;
static FloatRoundStyle const kRoundBigB = RoundBigB_;
static FloatRoundStyle const kRoundSmallB = RoundSmallB_;
static TensorFloat32Op const kPrecision = Precision_;
};
namespace detail {
template<
int N,
FloatRoundStyle RoundBig = FloatRoundStyle::round_toward_zero,
FloatRoundStyle RoundSmall = FloatRoundStyle::round_half_ulp_truncate
>
struct ConvertAndPackAccurateF32 {
/// Rounding styles for big and small part
static FloatRoundStyle const kRoundBig = RoundBig;
static FloatRoundStyle const kRoundSmall = RoundSmall;
/// Converter type
using Converter = NumericConverterFastF32<kRoundBig, kRoundSmall>;
/// Source fragement
using SourceFragment = Array<float, N>;
/// Destination fragment
using DestinationFragment = Array<tfloat32_t, N>;
/// Converter Fragment holding two tfloat32_t elements for every float
using ConverterFragment = Array<tfloat32_t, 2>;
/// Index in fargments for the big and small part
static int const kBigIndex = 0;
static int const kSmallIndex = 1;
CUTLASS_HOST_DEVICE
void operator()(SourceFragment const &source,
DestinationFragment &dst_big,
DestinationFragment &dst_small) {
Converter convert_;
ConverterFragment result_;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
// convert source to result fragment
result_ = convert_(source[i]);
// store converted result fragments to destination fragment
dst_big[i] = result_[kBigIndex];
dst_small[i] = result_[kSmallIndex];
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK_ = 1,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Used for partial specialization
typename Enable = bool
>
class MmaTensorOpFastF32;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float*float+float => float using TF32 TensorOps
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK_,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor,
/// Used for partial specialization
typename Enable
>
class MmaTensorOpFastF32<
Shape_,
float, LayoutA_,
float, LayoutB_,
float, LayoutC_,
Policy_, PartitionsK_,
AccumulatorsInRowMajor, Enable> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = float;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = float;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = float;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddFastF32;
/// Architecture tag from underlying instruction
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Complex transform on A operand
static ComplexTransform const kTransformA = ComplexTransform::kNone;
/// Complex transform on B operand
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// Tune F32 to TF32 big small conversion for float operation
/// Different combination of big small conversin can cause different tradeoff
/// between speed and accuracy. Generally, use round_half_ulp_truncate can
/// improve the performance but hur the accuracy.
using MmaFastF32 = FastF32 <
FloatRoundStyle::round_toward_zero, // kRoundBigA
FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallA
FloatRoundStyle::round_toward_zero, // kRoundBigB
FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallB
TensorFloat32Op::k3xTF32 // Number of TF32 operations
>;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
kThreadCount,
kPartitionsK
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA =
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements * 2>;
/// Fragment bisecting big and small sections
using AccessTypeFragmentA =
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements>;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kRow,
kThreadCount,
kPartitionsK
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB =
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements * 2>;
/// Fragment bisecting big and small sections
using AccessTypeFragmentB =
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements>;
/// Index in fargments for the big and small part
static int const kBigIndex = 0;
static int const kSmallIndex = 1;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC,
typename ArchMmaOperator::Shape, typename Policy::OpDelta>;
/// Storage for C tile
using FragmentC = typename IteratorC::Fragment;
/// Number of mma operations performed
using MmaIterations = MatrixShape<
(Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM,
(Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN
>;
public:
/// Underlying matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaTensorOpFastF32() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C
) const {
AccessTypeFragmentA const *ptr_A = reinterpret_cast<AccessTypeFragmentA const*>(&A);
AccessTypeFragmentB const *ptr_B = reinterpret_cast<AccessTypeFragmentB const*>(&B);
//
// Accumulate in place
//
D = C;
mma_operator(D, ptr_A[kSmallIndex], ptr_B[kBigIndex], D);
mma_operator(D, ptr_A[kBigIndex], ptr_B[kSmallIndex], D);
mma_operator(D, ptr_A[kBigIndex], ptr_B[kBigIndex], D);
if (MmaFastF32::kPrecision == TensorFloat32Op::k4xTF32)
mma_operator(D, ptr_A[kSmallIndex], ptr_B[kSmallIndex], D);
}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void mma_operator(
FragmentC &D,
AccessTypeFragmentA const &A,
AccessTypeFragmentB const &B,
FragmentC const &C
) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A);
MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B);
MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D);
// Serpentine visitation order maximizing reuse of Ra
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// This allows to reuse of Rb when at serpentine turns
int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n);
if (AccumulatorsInRowMajor) { // matrix B is reordered
mma(
ptr_D[n_serpentine + m * MmaIterations::kColumn],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[n_serpentine + m * MmaIterations::kColumn]);
} else {
mma(
ptr_D[m + n_serpentine * MmaIterations::kRow],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[m + n_serpentine * MmaIterations::kRow]);
}
} // end n loop
} // end m loop
#else
assert(0);
#endif
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
//
// Define conversions from source type to instruction type
//
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
detail::ConvertAndPackAccurateF32<
FragmentA::kElements / 2,
MmaFastF32::kRoundBigA,
MmaFastF32::kRoundSmallA> convert_A;
detail::ConvertAndPackAccurateF32<
FragmentB::kElements,
MmaFastF32::kRoundBigB,
MmaFastF32::kRoundSmallB> convert_B;
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements> *ptr_dst_B =
reinterpret_cast<Array<typename ArchMmaOperator::ElementB, FragmentB::kElements> *>(&dst_B);
convert_B(B, ptr_dst_B[0], ptr_dst_B[1]);
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> *ptr_dst_A =
reinterpret_cast<Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> *>(&dst_A);
Array<ElementA, FragmentA::kElements / 2> const *ptr_A =
reinterpret_cast<Array<ElementA, FragmentA::kElements / 2> const *>(&A);
convert_A(ptr_A[0], ptr_dst_A[0], ptr_dst_A[2]);
convert_A(ptr_A[1], ptr_dst_A[1], ptr_dst_A[3]);
#else
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_tensor_op_fast_f32.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_fast_f32.h",
"repo_id": "include",
"token_count": 5508
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a canonical coordinate for rank=2 matrices offering named indices.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// MatrixCoord wraps Coord<2, int> to provide a helper for accessing named dimensions. Classes
/// expecting a coordinate in the rank=2 index space of a matrix should use MatrixCoord.
struct MatrixCoord : public Coord<2, int> {
public:
/// Integer-valued index
using Index = int;
/// Base type is a Coord of rank=2
using Base = Coord<2, Index>;
/// LongIndex type
using LongIndex = typename Base::LongIndex;
private:
/// Rows dimension
static int const kRow = 0;
/// Columns dimension
static int const kColumn = 1;
public:
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
MatrixCoord() { }
/// Constructs from Coord<2>
CUTLASS_HOST_DEVICE
MatrixCoord(Coord<2, Index> const &coord): Base(coord) { }
/// Helper to construct from a row and column
CUTLASS_HOST_DEVICE
MatrixCoord(Index row, Index column): Base(make_Coord(row, column)) { }
/// Helper to construct from a row and column, which are LongIndex based
CUTLASS_HOST_DEVICE
MatrixCoord(LongIndex row, LongIndex column): Base(make_Coord(Index(row), Index(column))) { }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index const & row() const { return this->at(kRow); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index & row() { return this->at(kRow); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index const & column() const { return this->at(kColumn); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index & column() { return this->at(kColumn); }
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
MatrixCoord operator+(Base const& b) const {
return MatrixCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
MatrixCoord operator-(Base const& b) const {
return MatrixCoord(Base::operator-(b));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
MatrixCoord operator*(Base const& b) const {
return MatrixCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
MatrixCoord operator/(Base const& b) const {
return MatrixCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
MatrixCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
MatrixCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
MatrixCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
MatrixCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/matrix_coord.h/0 | {
"file_path": "include/cutlass/matrix_coord.h",
"repo_id": "include",
"token_count": 1488
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a final reduction for softmax
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/memory_sm75.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace kernel {
template <
typename ElementNorm_,
typename ElementSum_,
typename ElementSoftmaxCompute_,
typename ThreadblockShape_,
bool GroupedProblem = false
>
class ApplySoftmaxFinalReduction {
public:
using ElementNorm = ElementNorm_;
using ElementSum = ElementSum_;
using ElementSoftmaxCompute = ElementSoftmaxCompute_;
using ThreadblockShape = ThreadblockShape_;
static const bool isGroupedProblem = GroupedProblem;
//
// Arguments
//
struct Arguments {
cutlass::gemm::GemmCoord* problem_sizes{nullptr};
cutlass::gemm::GemmCoord problem_size{};
ElementNorm* block_Norm{nullptr};
ElementSum* block_Sum{nullptr};
int64_t* offset_Norm_Device{nullptr};
int64_t* offset_Sum_Device{nullptr};
int64_t batch_stride_Max{0};
int64_t batch_stride_Sum{0};
//
// Methods
//
Arguments() { }
// Non-grouped constructor without batching
Arguments(
cutlass::gemm::GemmCoord problem_size,
ElementNorm* block_Norm,
ElementSum* block_Sum
):
problem_size(problem_size),
block_Norm(block_Norm),
block_Sum(block_Sum),
problem_sizes(nullptr),
offset_Norm_Device(nullptr),
offset_Sum_Device(nullptr),
batch_stride_Max(0),
batch_stride_Sum(0)
{
}
// Non-grouped constructor with batching
Arguments(
cutlass::gemm::GemmCoord problem_size,
ElementNorm* block_Norm,
ElementSum* block_Sum,
int64_t batch_stride_Max,
int64_t batch_stride_Sum
):
problem_size(problem_size),
block_Norm(block_Norm),
block_Sum(block_Sum),
batch_stride_Max(batch_stride_Max),
batch_stride_Sum(batch_stride_Sum),
problem_sizes(nullptr),
offset_Norm_Device(nullptr),
offset_Sum_Device(nullptr)
{
}
// Grouped constructor
Arguments(
cutlass::gemm::GemmCoord *problem_sizes,
ElementNorm* block_Norm,
ElementSum* block_Sum,
int64_t* offset_Norm_Device,
int64_t* offset_Sum_Device
):
problem_sizes(problem_sizes),
problem_size(cutlass::gemm::GemmCoord(0, 0, 0)),
block_Norm(block_Norm),
block_Sum(block_Sum),
offset_Norm_Device(offset_Norm_Device),
offset_Sum_Device(offset_Sum_Device)
{
}
};
struct SharedStorage {
};
//
// Params struct
//
struct Params {
Arguments args;
//
// Methods
//
Params() { }
Params(Arguments const &args_): args(args_) { }
};
private:
public:
CUTLASS_DEVICE
ApplySoftmaxFinalReduction() { }
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
apply(params, shared_storage);
}
private:
/// Full reduction
CUTLASS_DEVICE
void apply(Params const ¶ms, SharedStorage &shared_storage) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int bdim = blockDim.x;
int block_batch = blockIdx.z;
// defining three vars for a general reduction module
cutlass::gemm::GemmCoord problem_size = isGroupedProblem ? params.args.problem_sizes[bid] : params.args.problem_size;
int m_dim_in_loop = isGroupedProblem ? problem_size.m() : tid + bdim;
int access_offset = isGroupedProblem ? 0 : bid * bdim;
if (!isGroupedProblem && access_offset + tid >= problem_size.m()) return;
ElementNorm *curr_ptr_Max = isGroupedProblem ? \
params.args.block_Norm + params.args.offset_Norm_Device[bid] : \
params.args.block_Norm + block_batch * params.args.batch_stride_Max;
ElementSum *curr_ptr_Sum = isGroupedProblem ? \
params.args.block_Sum + params.args.offset_Sum_Device[bid] : \
params.args.block_Sum + block_batch * params.args.batch_stride_Sum;
int threadblock_num = (problem_size.n() + ThreadblockShape::kN - 1) / ThreadblockShape::kN;
using ConvertSumOutput = cutlass::NumericConverter<ElementSum, ElementSoftmaxCompute>;
using ConvertNormOutput = cutlass::NumericConverter<ElementNorm, ElementSoftmaxCompute>;
using ConvertSum = cutlass::NumericConverter<ElementSoftmaxCompute, ElementSum>;
using ConvertNorm = cutlass::NumericConverter<ElementSoftmaxCompute, ElementNorm>;
ConvertSum convert_sum;
ConvertNorm convert_norm;
ConvertSumOutput convert_sum_output;
ConvertNormOutput convert_norm_output;
uint32_t float_max_bits = 0xff7fffff;
float min_float = reinterpret_cast<float const &>(float_max_bits);
CUTLASS_PRAGMA_UNROLL
for (int idx_m = tid; idx_m < m_dim_in_loop; idx_m += bdim) {
ElementNorm *access_n = curr_ptr_Max + idx_m + access_offset;
ElementSum *access_s = curr_ptr_Sum + idx_m + access_offset;
ElementNorm *access_n_bak = access_n;
ElementSum *access_s_bak = access_s;
ElementSoftmaxCompute max_val = ElementSoftmaxCompute(min_float);
ElementSoftmaxCompute sum_val = ElementSoftmaxCompute(0);
ElementNorm fetch_n;
ElementSum fetch_s;
CUTLASS_PRAGMA_UNROLL
for (int idx_n = 0; idx_n < threadblock_num; idx_n++) {
cutlass::arch::global_load<ElementNorm, sizeof(ElementNorm)>(fetch_n, access_n, true);
max_val = cutlass::fast_max(max_val, convert_norm(fetch_n));
access_n += problem_size.m();
}
access_n = access_n_bak;
CUTLASS_PRAGMA_UNROLL
for (int idx_n = 0; idx_n < threadblock_num; idx_n++) {
cutlass::arch::global_load<ElementNorm, sizeof(ElementNorm)>(fetch_n, access_n, true);
cutlass::arch::global_load<ElementSum, sizeof(ElementSum)>(fetch_s, access_s, true);
sum_val += convert_sum(fetch_s) * cutlass::fast_exp(convert_norm(fetch_n) - max_val);
access_n += problem_size.m();
access_s += problem_size.m();
}
ElementSoftmaxCompute inv_sum = cutlass::constants::one<ElementSoftmaxCompute>() / sum_val;
access_n = access_n_bak;
access_s = access_s_bak;
access_n[0] = convert_norm_output(max_val);
access_s[0] = convert_sum_output(inv_sum);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace reduction
} // namespace cutlass
| include/cutlass/reduction/kernel/reduce_softmax_final.h/0 | {
"file_path": "include/cutlass/reduction/kernel/reduce_softmax_final.h",
"repo_id": "include",
"token_count": 3398
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a matrix object intended for storing data in registers and operations within
a CUDA thread.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_coord.h"
namespace cutlass {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Per-thread matrix object storing a packed matrix
template <
typename Element,
int Rows,
int Columns,
typename Layout = layout::RowMajor
>
class Matrix : public Array<Element, Rows * Columns> {
public:
// Verify layout refers to a rank=2 matrix.
static_assert(
Layout::kRank == 2,
"Layout type must refer to a rank=2 matrix");
/// Base type
using Base = Array<Element, Rows * Columns>;
/// Element type
using Element = Element_;
/// Number of rows
static int const kRows = Rows;
/// Number of columns
static int const kColumns = Columns;
/// Layout within the array
using Layout = Layout_;
/// Reference type to an element
using Reference = Element &;
/// Logical rank of tensor index space
static int const kRank = 2;
/// Index type
using Index = typename Layout::Index;
/// Long index used for pointer offsets
using LongIndex = typename Layout::LongIndex;
/// Coordinate in logical tensor space
using TensorCoord = typename Layout::TensorCoord;
/// Stride type
using Stride = typename Layout::Stride;
/// TensorRef to matrix object
using TensorRef = TensorRef<Element, kRank, Layout>;
/// TensorRef to constant matrix object
using ConstTensorRef = typename TensorRef::ConstTensorRef;
/// TensorRef to matrix object
using TensorView = TensorView<Element, kRank, Layout>;
/// TensorRef to constant matrix object
using ConstTensorView = typename TensorView::ConstTensorView;
/// Diagonal vector
using Diagonal = Vector<Element, __NV_STD_MIN(kRows, kColumns)>;
private:
public:
//
// Methods
//
/// Returns the size of the object
CUTLASS_HOST_DEVICE
static MatrixCoord extent() {
return make_Coord(kRows, kColumns);
}
/// Returns the layout object
CUTLASS_HOST_DEVICE
static Layout layout() {
return Layout::packed(extent());
}
/// Ctor
CUTLASS_HOST_DEVICE
Matrix() { }
/// Ctor
CUTLASS_HOST_DEVICE
Matrix(Diagonal const &diag) {
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
TensorRef ref() {
return TensorRef(this->data(), layout());
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
ConstTensorRef const_ref() const {
return ConstTensorRef(this->data(), layout());
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
TensorView view() {
return TensorView(ref(), extent());
}
/// Returns a TensorView to const data
CUTLASS_HOST_DEVICE
ConstTensorView const_view() const {
return ConstTensorView(const_ref(), extent());
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference at(MatrixCoord const& coord) const {
typename Base::size_type offset_(layout().offset(coord));
return Base::at(offset_);
}
/// Returns the number of scalar elements needed to store tensor.
CUTLASS_HOST_DEVICE
LongIndex capacity() const {
return LongIndex(Base::size());
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Column vector defined as a matrix with exactly one column
template <
typename Element,
int Rows,
typename Layout = layout::ColumnMajor
>
using ColumnVector = Matrix<Element, Rows, 1, Layout>;
/// Row vector defined as a matrix with exactly one row
template <
typename Element,
int Columns,
typename Layout = layout::RowMajor
>
using RowVector = Matrix<Element, 1, Columns, Layout>;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace cutlass
| include/cutlass/thread/matrix.h/0 | {
"file_path": "include/cutlass/thread/matrix.h",
"repo_id": "include",
"token_count": 1676
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of tiles
from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last
"residue" tile first, with the objective of minimizing predicate mask updates
during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileAccessIteratorTriangularMatrix
///
template <typename Shape, typename Element, typename Layout,
int AdvanceRank, typename ThreadMap,
SideMode kSideMode, FillMode kFillMode, DiagType kDiagType,
typename AccessType>
class PredicatedTileAccessIteratorTriangularMatrix;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for pitch-linear data.
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, SideMode kSideMode, FillMode kFillMode, DiagType kDiagType, typename AccessType_>
class PredicatedTileAccessIteratorTriangularMatrix<Shape_, Element_, layout::PitchLinear,
AdvanceRank, ThreadMap_, kSideMode, kFillMode, kDiagType, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
using CompareOp = typename TrMatrixCompareOp<kFillMode, kDiagType>::Type;
static_assert( kFillMode == FillMode::kFull ||
((kFillMode == FillMode::kLower || kFillMode == FillMode::kUpper) && AccessType::kElements == 1),
"BLAS3 iterator for the triangular/symmetric matrix must use AccessType::kElements as 1");
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static int const kPredicatesPerByte = 4;
static int const kPredicatesPerWord = 4 * kPredicatesPerByte;
static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector;
/// Number of 32b words containing predicates
static int const kPredicateByteCount =
(kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte;
static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4;
static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u;
static_assert(kPredicateWordCount <= 4, "Too many predicates.");
/// Predicate vector stores mask to guard accesses
using Mask = Array<uint32_t, kPredicateWordCount>;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend PredicatedTileAccessIteratorTriangularMatrix;
private:
/// stride of pitch-linear layout (units of Element)
StrideIndex stride_;
/// (true) pitch-linear layout is mapped to row-major matrix
/// (false) pitch-linear layout is mapped to column-major matrix
bool is_row_major_;
/// for vectorized access across the diagonal boundary guard condition is
/// checked for the element on the boundary
int access_diagonal_boundary_;
/// amount (in byte) to increment pointer to move to next access along
/// strided dimension
LongIndex inc_strided_;
/// amount (in byte) to increment pointer from last access to first access
/// of next tile
LongIndex inc_next_;
/// amount (in byte) to increment pointer from first access of current tile
/// to first access of next tile
LongIndex inc_advance_;
public:
// Default ctor
CUTLASS_HOST_DEVICE
Params(): stride_(0), inc_strided_(0), inc_next_(0), inc_advance_(0), is_row_major_(false), access_diagonal_boundary_(0) { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout, bool is_row_major, int access_diagonal_boundary) :
stride_(layout.stride(0)), is_row_major_(is_row_major), access_diagonal_boundary_(access_diagonal_boundary) {
inc_strided_ = (LongIndex(stride_) * ThreadMap::Delta::kStrided) *
sizeof_bits<Element>::value / 8;
if (kAdvanceRank) {
// advance along strided dimension
inc_advance_ =
Shape::kStrided * LongIndex(stride_) * sizeof_bits<Element>::value / 8;
} else {
// advance along contiguous dimension
inc_advance_ = Shape::kContiguous * sizeof_bits<Element>::value / 8;
}
inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kStrided - 1) *
ThreadMap::Delta::kStrided * LongIndex(stride_) *
sizeof_bits<Element>::value / 8;
};
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Guard predicates
uint32_t predicates_[kPredicateWordCount];
/// Track global memory addresses on the diagonal
/// To ignore imag part for diagonal elements of hermitian matrices
uint32_t predicates_onDiag_[kPredicateWordCount];
/// Size of tensor
TensorCoord extent_;
/// Initial offset for each thread
TensorCoord thread_offset_;
/// Iteration along vectors implied by the thread map
int iteration_vector_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
private:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0u;
predicates_onDiag_[i] = 0u;
}
CompareOp compare_op;
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) {
int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int c = access_residual / kAccessesPerVector;
int v = access_residual % kAccessesPerVector;
TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements,
s * ThreadMap::Delta::kStrided);
TensorCoord coord = thread_offset_ + iteration_coord;
bool guard;
bool onDiag = false;
guard = ((coord.strided() < extent.strided()) &&
(coord.contiguous() < extent.contiguous()));
// guard access on the wrong side of the triagular matrix diagonal
if (kFillMode == FillMode::kLower || kFillMode == FillMode::kUpper) {
coord += TensorCoord{params_.access_diagonal_boundary_, 0};
bool triagular_guard_row_major = compare_op(coord.strided(), coord.contiguous()) | !params_.is_row_major_;
bool triagular_guard_col_major = compare_op(coord.contiguous(), coord.strided()) | params_.is_row_major_;
guard = guard && triagular_guard_row_major && triagular_guard_col_major;
if (kDiagType == DiagType::kUnit) {
onDiag = (guard && coord.strided() == coord.contiguous()) ? true : false;
}
}
int pred_idx_onDiag = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx_onDiag = pred_idx_onDiag / kPredicatesPerWord;
int residual_onDiag = pred_idx_onDiag % kPredicatesPerWord;
int byte_idx_onDiag = residual_onDiag / kPredicatesPerByte;
int bit_idx_onDiag = residual_onDiag % kPredicatesPerByte;
predicates_onDiag_[word_idx_onDiag] |= (unsigned(onDiag) << (byte_idx_onDiag * 8 + bit_idx_onDiag));
int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx));
}
}
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(const_cast<NonConstPointer>(pointer))),
extent_(extent) {
// Per-thread offset in logical coordinates of tensor
thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id);
// update internal pointers
Layout layout(params_.stride_);
add_pointer_offset(layout(thread_offset_));
compute_predicates_(extent_);
set_iteration_index(0);
}
/// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id)
: PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided());
pointer_ += Shape::kContiguous * tile_offset.contiguous();
thread_offset_ += TensorCoord{0, Shape::kStrided * tile_offset.strided()};
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous());
pointer_ += Shape::kStrided * tile_offset.strided();
thread_offset_ += TensorCoord{Shape::kContiguous * tile_offset.contiguous(), 0};
}
compute_predicates_(extent_);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) / 8) + iteration_vector_;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
pointer_ += params_.inc_strided_;
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix operator++(int) {
PredicatedTileAccessIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = enable ? 0u : predicates_[i];
}
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0xffffffff;
}
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = mask[i];
}
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] = predicates_[i];
}
}
/// Return if the address in on the diagonal
CUTLASS_HOST_DEVICE
bool getOnDiag() {
int pred_idx =
iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_onDiag_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
int pred_idx =
iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
//return true;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for column-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_,
SideMode kSideMode, FillMode kFillMode, DiagType kDiagType,
typename AccessType_>
class PredicatedTileAccessIteratorTriangularMatrix<Shape_, Element_, layout::ColumnMajor,
AdvanceRank, ThreadMap_, kSideMode, kFillMode, kDiagType,
AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIteratorTriangularMatrix<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap,
kSideMode, kFillMode, kDiagType, AccessType>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
static int const kAccessDiagonalBoundary =
(kFillMode == FillMode::kLower) ? (AccessType::kElements - 1) : 0;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorTriangularMatrix;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0)), false, kAccessDiagonalBoundary){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(),
threadblock_offset.column())) {}
/// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix operator++(int) {
PredicatedTileAccessIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Return if the address in on the diagonal
CUTLASS_HOST_DEVICE
bool getOnDiag() {
return iterator_.getOnDiag();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_,
SideMode kSideMode, FillMode kFillMode, DiagType kDiagType,
typename AccessType_>
class PredicatedTileAccessIteratorTriangularMatrix<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_,
kSideMode, kFillMode, kDiagType, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIteratorTriangularMatrix<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap,
kSideMode, kFillMode, kDiagType, AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
static int const kAccessDiagonalBoundary =
(kFillMode == FillMode::kUpper) ? (AccessType::kElements - 1) : 0;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorTriangularMatrix;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0)), true, kAccessDiagonalBoundary){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix operator++(int) {
PredicatedTileAccessIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Return if the address in on the diagonal
CUTLASS_HOST_DEVICE
bool getOnDiag() {
return iterator_.getOnDiag();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/predicated_tile_access_iterator_triangular_matrix.h/0 | {
"file_path": "include/cutlass/transform/threadblock/predicated_tile_access_iterator_triangular_matrix.h",
"repo_id": "include",
"token_count": 10731
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template wraps the vector access iterator concept to load whole vector from tensors in
memory. This is typically used for per-channel scale and bias in convolution kernels.
*/
#pragma once
#include "cutlass/transform/threadblock/predicated_vector_access_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename VectorAccessIterator_>
class VectorIterator {
public:
using VectorAccessIterator = VectorAccessIterator_;
using Shape = typename VectorAccessIterator::Shape;
using Element = typename VectorAccessIterator::Element;
using Layout = typename VectorAccessIterator::Layout;
using TensorCoord = typename Layout::TensorCoord;
using AccessType = typename VectorAccessIterator::AccessType;
using TensorRef = typename VectorAccessIterator::TensorRef;
using Index = typename VectorAccessIterator::Index;
using LongIndex = typename VectorAccessIterator::LongIndex;
static int const kElementsPerAccess = VectorAccessIterator::kElementsPerAccess;
static int const kRowsPerIteration = VectorAccessIterator::kRowsPerIteration;
static int const kThreads = VectorAccessIterator::kThreads;
static int const kIterations = VectorAccessIterator::kIterations;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<
Element, kElementsPerAccess * kIterations>;
private:
/// Internal state
VectorAccessIterator vector_access_iterator_;
public:
/// Constructor
CUTLASS_HOST_DEVICE
VectorIterator(
Element const *ptr,
TensorCoord extent,
int thread_idx,
int warp_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
vector_access_iterator_(ptr, extent, thread_idx, warp_idx, threadblock_offset) { }
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
VectorIterator &operator++() {
vector_access_iterator_.advance();
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
VectorIterator operator++(int) {
VectorIterator self(*this);
operator++();
return self;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.clear();
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[c],
vector_access_iterator_.get() + pointer_offset,
vector_access_iterator_.valid()
);
++vector_access_iterator_;
}
// }
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
vector_access_iterator_.set_iteration_index(0);
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void advance() {
vector_access_iterator_.advance();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/vector_iterator.h/0 | {
"file_path": "include/cutlass/transform/threadblock/vector_iterator.h",
"repo_id": "include",
"token_count": 1488
} | 37 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Base & visitor classes of DAGIR Nodes
"""
import ctypes
from re import sub
from cutlass_library import LayoutType
from cutlass.backend.evt.ir.layout_algorithm import _list_to_tuple, _reverse_tuple
from cutlass.backend.evt.ir.tensor import Tensor
class ImplBase:
"""
Base class for Node Implementation
"""
def __init__(self, node) -> None:
self.node = node
self.name = node.name
self.tensor = node.tensor
self._type_decl = None
self.stride_dtype = "int64_t"
@staticmethod
def match(node, problem_size: tuple):
"""
Match function used in get_underlying_impl
"""
raise NotImplementedError(f"The `match` function is not defined.")
@property
def argument_type(self):
"""
Default class for Argument Type
"""
class _Argument(ctypes.Structure):
_fields_ = []
def __init__(self, *args, **kwargs) -> None:
pass
return _Argument
@property
def name_camel(self) -> str:
"""
Return the CamelCase name.
"""
return sub(r"(_|-)+", " ", self.name).title().replace(" ", "")
def _emit_cute_tuple(self, py_tuple):
"""
Emit the cute tuple to C++ code
"""
if isinstance(py_tuple, int):
if py_tuple in [0, 1]:
return f"cute::Int<{py_tuple}>"
else:
return f"{self.stride_dtype}"
elif isinstance(py_tuple, tuple):
decl = "cute::Stride<"
for item in py_tuple:
decl += self._emit_cute_tuple(item) + ", "
return decl[:-2] + ">"
else:
raise ValueError(f"_emit_cute_tuple only accepts tuple or int, got {type(py_tuple).__name__}")
@property
def stride_mnl(self):
"""
Typename StrideMNL
"""
stride = _list_to_tuple([self.stride[-2], self.stride[-1]] + list(_reverse_tuple(tuple(self.stride[:-2]))))
return self._emit_cute_tuple(stride)
def get_non_constant_stride(self, py_tuple):
if isinstance(py_tuple, int):
if py_tuple not in [0, 1]:
return py_tuple
else:
return None
non_constant_stride = []
for item in py_tuple:
item_out = self.get_non_constant_stride(item)
if item_out:
non_constant_stride.append(item_out)
return tuple(non_constant_stride)
def get_stride_mnl(self):
"""
Get the non-zero stride mnl. This is used in argument construction
"""
stride = _list_to_tuple([self.stride[-2], self.stride[-1]] + list(_reverse_tuple(tuple(self.stride[:-2]))))
return stride
def get_smem_size(self, *args, **kwargs):
"""
Get the shared memory size and alignment of current node
"""
return (0, 1)
class NoOpImpl(ImplBase):
"""
The NoOpImpl does nothing but forward its input to users
"""
def __init__(self, node) -> None:
super().__init__(node)
@staticmethod
def match(node, problem_size: tuple):
if node.op == "store":
# Store that is not output is a No OP
return not node.is_output
class NodeBase:
"""
Base class of DAG Node
"""
def __init__(self, name: str) -> None:
self.name = name
self.underlying_impl = None
self._tensor = None
# Whether the node is disabled for emit
self.disabled = False
@property
def name_camel(self) -> str:
"""
Return the CamelCase name.
"""
return self.underlying_impl.name_camel
@property
def tensor(self) -> Tensor:
"""
Return the output tensor (concept: cutlass.backend.evt.ir.tensor)
"""
return self._tensor
@tensor.setter
def tensor(self, kwargs):
"""
Setting the tensor
"""
self._tensor = Tensor(**kwargs)
#
# Helper functions for type/shape propagation
#
def shape_propagation(self, input_node_metas):
"""
Infer shape from input nodes
General Broadcasting Rules from NumPy
When operating on two arrays, we compare their shapes element-wise.
It starts with the trailing (i.e. rightmost) dimension and works its
way left. Two dimensions are compatible when
1. they are equal
2. one of them is 1
"""
if self._tensor is not None:
return
shape = None
for src in input_node_metas:
src_shape = src.tensor.shape
if shape is None:
shape = src_shape
else:
len_difference = len(shape) - len(src_shape)
if len_difference > 0:
for _ in range(len_difference):
src_shape = [1, ] + list(src_shape)
elif len_difference < 0:
for _ in range(-len_difference):
shape = [1, ] + list(shape)
broadcasted_shape = []
# Infer broadcast shape
for shape_dim, src_dim in zip(reversed(shape), reversed(src_shape)):
if shape_dim == 1:
broadcasted_shape = [src_dim, ] + list(broadcasted_shape)
elif src_dim == 1:
broadcasted_shape = [shape_dim, ] + list(broadcasted_shape)
elif shape_dim == src_dim:
broadcasted_shape = [shape_dim, ] + list(broadcasted_shape)
else:
error_msg = "Dimension mismatch between "
for src_ in input_node_metas:
error_msg += f"{src_.name}{src_.tensor.shape}, "
error_msg = error_msg[:-2] + "."
raise RuntimeError(error_msg)
shape = tuple(broadcasted_shape)
self._tensor = Tensor(element=self.element_output, shape=shape, layout_tag=LayoutType.RowMajor)
def type_propagation(self, *args, **kwargs):
"""
Each node is associated with two data types: `element` and `element_output`.
The `element_output` is the type of return array of the node. The `element`
has specific meaning for different node types.
* Load Node: data type of tensor in gmem
* Compute Node: element compute
* Store Node: data type of tensor in gmem
This function must be overloaded in the derived classes
"""
raise NotImplementedError(f"Function `type_propagation` is not overloaded in {self.__class__.__name__}")
def broadcast_propagation(self, input_node_metas: 'list[NodeBase]'):
"""
Propagate the broadcast in the reversed topological order.
For example:
C[l, m, n] = A[m, 1] + B[l, m, n]
After the broadcast propagation, it will be come
C[l, m, n] = A[l, m, n] + B[l, m, n]
and each tensor will have a proper stride accessing the underlying tensor
"""
if self.tensor is None:
raise RuntimeError(f"The tensor of node {self.name} is unknown.")
for child in input_node_metas:
child.tensor.broadcast(self.tensor.shape)
def get_underlying_impl(self, problem_size: tuple):
"""
Get the underlying implementation of the current node.
"""
if self.tensor is None:
raise RuntimeError(f"The Layout of node {self.name} is unknown. Please call PassShapeTypePropagation first.")
for impl in self.possible_impls:
if impl.match(self, problem_size):
self.underlying_impl = impl(self)
break
if self.underlying_impl is None:
raise NotImplementedError(f"No matching op for node {self.name} with stride {self.tensor.stride}.")
#
# Visitor Nodes & Impls
#
class TopoVisitorImpl(ImplBase):
"""
Impl for topological visitor
"""
def __init__(self, node) -> None:
super().__init__(node.output_node)
self.name = node.name
self.element_output = node.output_node.element_output
class TopoVisitorNode(NodeBase):
def __init__(self, name: str, subgraph, output_node) -> None:
super().__init__(name)
self.subgraph = subgraph
self.output_node = output_node
self.op = "dag"
self.underlying_impl = TopoVisitorImpl(self)
| python/cutlass/backend/evt/ir/node.py/0 | {
"file_path": "python/cutlass/backend/evt/ir/node.py",
"repo_id": "python",
"token_count": 4480
} | 38 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from cuda import cuda
import numpy as np
from cutlass.backend.memory_manager import device_mem_alloc, todevice
from cutlass.utils.datatypes import is_cupy_tensor, is_numpy_tensor, is_torch_tensor
class NumpyFrontend:
"""
Frontend node for numpy
"""
@staticmethod
def argument(np_tensor: "np.ndarray", is_output: "bool") -> cuda.CUdeviceptr:
"""Convert the input numpy tensor to CUDA device pointer
:param np_tensor: input numpy nd array
:param is_output: whether the tensor is output
:return: CUDA device pointer
"""
# copy the data to device
if is_output:
return device_mem_alloc(np_tensor.size * np_tensor.itemsize)
else:
return todevice(np_tensor)
class TorchFrontend:
"""
Frontend node for torch
"""
@staticmethod
def argument(torch_tensor: "torch.Tensor") -> cuda.CUdeviceptr:
"""Convert the input torch tensor to CUDA device pointer
:param torch_tensor: input torch tensor
:param is_output: whether the tensor is output
:return: CUDA device pointer
"""
# check the device of torch_tensor
if not torch_tensor.is_cuda:
torch_tensor = torch_tensor.to("cuda")
return cuda.CUdeviceptr(torch_tensor.data_ptr())
class CupyFrontend:
"""
Frontend node for cupy
"""
@staticmethod
def argument(cupy_ndarray: "cp.ndarray"):
return cuda.CUdeviceptr(int(cupy_ndarray.data.ptr))
class TensorFrontend:
"""
Universal Frontend for client-provide tensors
"""
@staticmethod
def argument(tensor, is_output=False):
if is_numpy_tensor(tensor):
return NumpyFrontend.argument(tensor, is_output)
elif is_torch_tensor(tensor):
return TorchFrontend.argument(tensor)
elif is_cupy_tensor(tensor):
return CupyFrontend.argument(tensor)
else:
raise NotImplementedError("Unknown Tensor Type")
| python/cutlass/backend/frontend.py/0 | {
"file_path": "python/cutlass/backend/frontend.py",
"repo_id": "python",
"token_count": 1312
} | 39 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for enumerating CUTLASS library kernels
"""
import argparse
import enum
from itertools import chain, product
import logging
import os.path
import shutil
import sys
import copy
from typing import Any, Dict, Optional, Sequence, Tuple
_LOGGER = logging.getLogger(__name__)
def logging_prefix(indent_level: int = 0) -> str:
"""String prefix for start of each debug log entry"""
prefix = '*** '
indent = ' '
return f"{prefix}{indent_level * indent}"
def log_debug_line(line: str, indent_level: int = 0) -> None:
"""Log one line of debug output"""
prefix = logging_prefix(indent_level)
_LOGGER.debug(prefix + line)
# Certain usecases of cutlass_library nearly always prefer to run as scripts with
# relative imports, rather than via an installed Python package. An example of this
# is using CUTLASS's CMake system to generate a library of kernels to be profiled.
# To make it easy to use these use cases when an existing installation of cutlass_library
# exists, this global flag can be set to true (via command-line arguments) to ensure
# that package-based installations are not used.
# Create a temporary argument parser to check only for the availability of the
# --disable-cutlass-package-imports argument, which controls whether package-based
# imports are disabled.
def _add_package_disablement_flag(argparser):
argparser.add_argument("--disable-cutlass-package-imports", action='store_true', required=False,
help="Disable use of cutlass_library from Python package")
_parser = argparse.ArgumentParser()
_add_package_disablement_flag(_parser)
_args, _ = _parser.parse_known_args()
# Add `CUTLASS_IGNORE_PACKAGE` to `builtins` so that it is visible for gating future
# imports without requiring importing another module. Ideally, we would just place this
# as a global variable in a module to that could be imported and checked (e.g.,
# utils.CUTLASS_IGNORE_PACKAGE). However, this raises the issue of determining
# where this module should be sourced (from the cutlass_library package or from
# a relative import), which is the problem this variable is being used to solve in the
# first place.
import builtins
builtins.CUTLASS_IGNORE_PACKAGE = _args.disable_cutlass_package_imports
try:
if CUTLASS_IGNORE_PACKAGE:
raise ImportError("Disabling attempt to import cutlass_library")
from cutlass_library.library import *
from cutlass_library.manifest import *
except ImportError:
from library import *
from manifest import *
###################################################################################################
#
def CudaToolkitVersionSatisfies(semantic_ver_string, major, minor, patch = 0):
# by default, use the latest CUDA Toolkit version
cuda_version = [11, 0, 132]
# Update cuda_version based on parsed string
if semantic_ver_string != '':
for i, x in enumerate([int(x) for x in semantic_ver_string.split('.')]):
if i < len(cuda_version):
cuda_version[i] = x
else:
cuda_version.append(x)
return cuda_version >= [major, minor, patch]
###################################################################################################
###################################################################################################
#
def EpilogueAlignment(max_alignment, tile, epilogue_steps = 8):
''' Helper to compute the maximum alignment of the epilogue '''
def product(X, identity = 1):
result = identity
for item in X:
result *= item
return result
elements_per_thread = product(tile.threadblock_shape[:-1]) // product(tile.warp_count) // 32 // epilogue_steps
return min(max_alignment, elements_per_thread)
def DefaultSwizzlingFunctor():
return SwizzlingFunctor.Identity8
# To use StreamK decomposition for basic GEMMs, set `swizzling_functor = SwizzlingFunctor.StreamK`
#
def CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, \
alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = DefaultSwizzlingFunctor()):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
# If alignment is a tuple or a list, then we have different alignments for A and B
alignment_a = alignment if isinstance(alignment, int) else alignment[0]
alignment_b = alignment if isinstance(alignment, int) else alignment[1]
alignment_c = min(8, alignment_a) if isinstance(alignment, int) else alignment[2]
A = TensorDescription(element_a, layout[0], alignment_a, complex_transform[0])
B = TensorDescription(element_b, layout[1], alignment_b, complex_transform[1])
C = TensorDescription(element_c, layout[2], alignment_c)
new_operation = GemmOperation(GemmKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
# Generates 3.0 API based GemmUniversal API kernels. Alignment constraints are folded in with layouts
def CreateGemmUniversal3xOperator(
manifest, layouts, tile_descriptions, data_types,
schedules = [[KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto]],
complex_transforms=None,
epilogue_functor=EpilogueFunctor.LinearCombination,
swizzling_functor=SwizzlingFunctor.Identity1,
tile_schedulers=[TileSchedulerType.Persistent]):
if type(data_types) is dict:
data_types = [data_types]
for s in schedules:
assert(len(s) == 2)
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none), ]
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0]]
combinations = product(layouts, tile_descriptions, data_types, complex_transforms, schedules, tile_schedulers)
for layout, tile_description, data_type, complex_transform, schedules, tile_scheduler in combinations:
kernel_schedule, epilogue_schedule = schedules
A = TensorDescription(
data_type["a_type"], layout[0][0], layout[0][1], complex_transform[0])
B = TensorDescription(
data_type["b_type"], layout[1][0], layout[1][1], complex_transform[1])
C = TensorDescription(data_type["c_type"], layout[2][0], layout[2][1])
D = TensorDescription(data_type["d_type"], layout[2][0], layout[2][1])
gemm_op_extra_args = {}
gemm_kind = GemmKind.Universal3x
element_compute = data_type.get("epi_type", data_type["acc_type"])
operation = GemmOperation(
gemm_kind, tile_description.minimum_compute_capability,
tile_description, A, B, C, element_compute, epilogue_functor, swizzling_functor, D,
kernel_schedule, epilogue_schedule, tile_scheduler, **gemm_op_extra_args)
manifest.append(operation)
operations.append(operation)
return operations
#
def CreateSparseGemmOperator(manifest, layouts, tile_descriptions, data_type, \
alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
gemm_kinds = [GemmKind.Sparse]
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment, complex_transform[0])
B = TensorDescription(element_b, layout[1], alignment, complex_transform[1])
C = TensorDescription(element_c, layout[2], alignment_c)
new_operation = GemmOperation(GemmKind.Sparse, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
#
def CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, data_type, \
alignment_constraints, complex_transforms):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
gemm_kinds = [GemmKind.PlanarComplex, GemmKind.PlanarComplexArray]
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for gemm_kind in gemm_kinds:
for layout in layouts:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment, complex_transform[0])
B = TensorDescription(element_b, layout[1], alignment, complex_transform[1])
C = TensorDescription(element_c, layout[2], alignment_c)
manifest.append(GemmOperation(gemm_kind, \
tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue))
return
#
def CreateGemmGroupedOperator(manifest, layouts, tile_descriptions, data_type, \
alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment, complex_transform[0])
B = TensorDescription(element_b, layout[1], alignment, complex_transform[1])
C = TensorDescription(element_c, layout[2], alignment_c)
new_operation = GroupedGemmOperation(GemmKind.Grouped, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
#
def CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, data_type, \
alignment_constraints, blas_mode, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
element_a, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for fill_mode in fill_modes:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
# SERK supported layouts (RowMajor, ColumnMajor) with no conjugation
complex_transform = ComplexTransform.none
# HERK supported layouts (RowMajor + conj, ColumnMajor)
if blas_mode == BlasMode.hermitian and layout[0] == LayoutType.RowMajor:
complex_transform = ComplexTransform.conj
alignment_c = 1 # Alignment only applies to A in SYRK
A = TensorDescription(element_a, layout[0], alignment, complex_transform)
C = SymmetricTensorDescription(element_c, layout[1], fill_mode, alignment_c)
# Rank-K update
new_operation = RankKOperation(RankKKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode)
manifest.append(new_operation)
operations.append(new_operation)
# Rank-2K update
new_operation = Rank2KOperation(RankKKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode)
manifest.append(new_operation)
operations.append(new_operation)
return operations
#
def CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, data_type, \
alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for side_mode in side_modes:
for fill_mode in fill_modes:
for diag_type in diag_types:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TriangularTensorDescription(element_a, layout[0], side_mode, fill_mode, diag_type,
alignment, complex_transform)
B = TensorDescription(element_b, layout[1], alignment)
C = TensorDescription(element_c, layout[2], alignment_c)
new_operation = TrmmOperation(TrmmKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
#
def CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, data_type, \
alignment_constraints, blas_mode, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
element_a, element_b, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for side_mode in side_modes:
for fill_mode in fill_modes:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
# SYMM supported layouts (RowMajor, ColumnMajor) with no conjugation
complex_transform = ComplexTransform.none
alignment_a = 1 # No vectorized access for the triangular matrix
alignment_c = min(8, alignment)
A = SymmetricTensorDescription(element_a, layout[0], fill_mode, alignment_a, complex_transform, side_mode)
# tensor A and B have same data type and layout
B = TensorDescription(element_b, layout[0], alignment)
C = TensorDescription(element_c, layout[1], alignment_c)
# SYMM/HEMM update
new_operation = SymmOperation(SymmKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode)
manifest.append(new_operation)
operations.append(new_operation)
# SYMM/HEMM update
new_operation = SymmOperation(SymmKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode)
manifest.append(new_operation)
operations.append(new_operation)
return operations
###########################################################################################################
# ConvolutionOperator support variations
# ____________________________________________________________________
# ConvolutionalOperator | Analytic | Optimized
# ____________________________________________________________________
# | Fprop | (strided) | (strided)
# | Dgrad | (strided, unity*) | (strided, unity)
# | Wgrad | (strided) | (strided)
# ____________________________________________________________________
#
# Note : Operator marked (*) are supported but not generated to keep the instantiated kernel count low
###########################################################################################################
# Convolution for 2D operations
def CreateConv2dOperator(manifest, layout, tile_descriptions, data_type, alignment_constraints, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
element_a, element_b, element_c, element_epilogue = data_type
# one exceptional case
# iterator algorithm (analytic and optimized)
iterator_algorithms = [IteratorAlgorithm.Analytic, IteratorAlgorithm.Optimized]
# by default, only generate the largest tile size, largest alignment, and optimized iterator
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
iterator_algorithms = [IteratorAlgorithm.Optimized]
operations = []
for tile in tile_descriptions:
for alignment in alignment_constraints:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment)
B = TensorDescription(element_b, layout[1], alignment)
C = TensorDescription(element_c, layout[2], alignment_c)
swizzling_functor_ = swizzling_functor
#
# Conv2d Fprop
#
if ConvKind.Fprop in conv_kinds:
# Strided support for Analytic and Optimized Fprop
for iterator_algorithm in iterator_algorithms:
new_operations = [
# None grouped kernel
Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Unity, epilogue_functor, swizzling_functor_),
]
# Instance group conv kernel
if tile.math_instruction.opcode_class == OpcodeClass.TensorOp and A.layout == LayoutType.TensorNHWC and \
tile.minimum_compute_capability >= 80:
# SingleGroup kernel
new_operations.append(Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Unity, epilogue_functor, swizzling_functor_, group_mode=GroupMode.SingleGroup))
# Analytic iterator supports MultipleGroup mode
if iterator_algorithm == IteratorAlgorithm.Analytic:
new_operations.append(Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Unity, epilogue_functor, swizzling_functor_, group_mode=GroupMode.MultipleGroup))
for new_operation in new_operations:
manifest.append(new_operation)
operations.append(new_operation)
#
# Conv2d Dgrad
#
if ConvKind.Dgrad in conv_kinds:
# Unity stride for Analytic and Optimized Dgrad
for iterator_algorithm in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Dgrad, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Unity, epilogue_functor, swizzling_functor_)
manifest.append(new_operation)
operations.append(new_operation)
# Strided support for Analytic Dgrad
# strided dgrad uses a special threadblock swizzle
# note that SwizzlingFunctor.StridedDgradHorizontal might be
# better for problem sizes with large activation channel count
swizzling_functor_strided_dgrad_ = SwizzlingFunctor.StridedDgradIdentity1
if IteratorAlgorithm.Analytic in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Dgrad, IteratorAlgorithm.Analytic, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_strided_dgrad_)
manifest.append(new_operation)
operations.append(new_operation)
# Strided support for Optimized Dgrad
if IteratorAlgorithm.Optimized in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Dgrad, IteratorAlgorithm.Optimized, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_strided_dgrad_)
manifest.append(new_operation)
operations.append(new_operation)
#
# Conv2d Wgrad
#
if ConvKind.Wgrad in conv_kinds:
# Strided support for Analytic and Optimized Wgrad
for iterator_algorithm in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Wgrad, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_)
manifest.append(new_operation)
operations.append(new_operation)
return operations
# Convolution for 2D operations specialized for few channels
def CreateConv2dFixedChannelsOperator(manifest, layout, tile_descriptions, data_type, channel_counts, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
element_a, element_b, element_c, element_epilogue = data_type
# one exceptional case
# iterator algorithm (analytic and optimized)
iterator_algorithms = [IteratorAlgorithm.FixedChannels,]
# by default, only generate the largest tile size, largest alignment, and optimized iterator
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
channel_counts = [channel_counts[0],]
operations = []
for tile in tile_descriptions:
for channel_count in channel_counts:
alignment_c = EpilogueAlignment(channel_count, tile)
A = TensorDescription(element_a, layout[0], channel_count)
B = TensorDescription(element_b, layout[1], channel_count)
C = TensorDescription(element_c, layout[2], alignment_c)
swizzling_functor_ = swizzling_functor
#
# Conv2d Fprop
#
if ConvKind.Fprop in conv_kinds:
# Strided support for Analytic and Optimized Fprop
for iterator_algorithm in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_)
manifest.append(new_operation)
operations.append(new_operation)
return operations
# Convolution for 2D operations specialized for few channels
def CreateConv2dFewChannelsOperator(manifest, layout, tile_descriptions, data_type, channel_counts, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
element_a, element_b, element_c, element_epilogue = data_type
# one exceptional case
# iterator algorithm (analytic and optimized)
iterator_algorithms = [IteratorAlgorithm.FewChannels,]
# by default, only generate the largest tile size, largest alignment, and optimized iterator
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
channel_counts = [channel_counts[0],]
operations = []
for tile in tile_descriptions:
for channel_count in channel_counts:
alignment_c = EpilogueAlignment(channel_count, tile)
A = TensorDescription(element_a, layout[0], channel_count)
B = TensorDescription(element_b, layout[1], channel_count)
C = TensorDescription(element_c, layout[2], alignment_c)
swizzling_functor_ = swizzling_functor
#
# Conv2d Fprop
#
if ConvKind.Fprop in conv_kinds:
# Strided support for Analytic and Optimized Fprop
for iterator_algorithm in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_)
manifest.append(new_operation)
operations.append(new_operation)
return operations
# Convolution for 3D operations
def CreateConv3dOperator(manifest, layout, tile_descriptions, data_type, alignment, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], epilogue_functor = EpilogueFunctor.LinearCombination):
element_a, element_b, element_c, element_epilogue = data_type
# one exceptional case
alignment_c = min(8, alignment)
# iterator algorithm (analytic and optimized)
iterator_algorithms = [IteratorAlgorithm.Analytic, IteratorAlgorithm.Optimized]
# by default, only generate the largest tile size and optimized iterators
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
iterator_algorithms = [IteratorAlgorithm.Optimized]
operations = []
# All tile sizes for Conv3dFprop and Conv3dWgrad
for tile in tile_descriptions:
A = TensorDescription(element_a, layout, alignment)
B = TensorDescription(element_b, layout, alignment)
C = TensorDescription(element_c, layout, alignment_c)
#
# Conv3d Fprop
#
if ConvKind.Fprop in conv_kinds:
# Strided support for Analytic and Optimized Fprop
for iterator_algorithm in iterator_algorithms:
new_operation = Conv3dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided)
manifest.append(new_operation)
operations.append(new_operation)
#
# Conv3d Wgrad
#
if ConvKind.Wgrad in conv_kinds:
# Strided support for Analytic and Optimized Wgrad
for iterator_algorithm in iterator_algorithms:
new_operation = Conv3dOperation(ConvKind.Wgrad, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor)
manifest.append(new_operation)
operations.append(new_operation)
# All tile sizes for Conv3dDgrad
for tile in tile_descriptions:
A = TensorDescription(element_a, layout, alignment)
B = TensorDescription(element_b, layout, alignment)
C = TensorDescription(element_c, layout, alignment_c)
#
# Conv3d Dgrad
#
if ConvKind.Dgrad in conv_kinds:
# Unity stride for Optimized Dgrad
new_operation = Conv3dOperation(ConvKind.Dgrad, IteratorAlgorithm.Optimized, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Unity, epilogue_functor)
manifest.append(new_operation)
operations.append(new_operation)
# Strided support for Analytic Dgrad
# Conv3dDgrad has a naive strided support which does not cut down redundant MMAs
new_operation = Conv3dOperation(ConvKind.Dgrad, IteratorAlgorithm.Analytic, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
# Convolution for Depthwise 2d conv
def CreateDepthwiseConv2dOperator(manifest, layout, tile_descriptions, data_type, alignment_constraints, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
element_a, element_b, element_c, element_epilogue = data_type
# iterator algorithm (FixedStrideDilation, Optimized)
iterator_algorithms = [IteratorAlgorithm.FixedStrideDilation, IteratorAlgorithm.Optimized]
# by default, only generate the largest tile size, largest alignment, and optimized iterator
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
operations = []
for tile in tile_descriptions:
for alignment in alignment_constraints:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment)
B = TensorDescription(element_b, layout[1], alignment)
C = TensorDescription(element_c, layout[2], alignment_c)
swizzling_functor_ = swizzling_functor
if ConvKind.Fprop in conv_kinds:
# Strided support for Optimized and FixedStridedDilation Depthwise Conv
for iterator_algorithm in iterator_algorithms:
stride_support = StrideSupport.Strided
if iterator_algorithm == IteratorAlgorithm.FixedStrideDilation:
if tile.stride == [-1, -1] or tile.dilation == [-1,-1]:
continue
stride_support = StrideSupport.Fixed
if iterator_algorithm == IteratorAlgorithm.Optimized:
if tile.stride != [-1, -1] or tile.dilation != [-1,-1]:
continue
new_operation = Conv2dOperation(ConvKind.Fprop,
iterator_algorithm,
tile.minimum_compute_capability,
tile,
A, B, C,
element_epilogue,
stride_support,
epilogue_functor,
swizzling_functor_,
group_mode=GroupMode.Depthwise)
manifest.append(new_operation)
operations.append(new_operation)
return operations
class ConvOperation3x:
"""All parameters of a CUTLASS 3 convolution operation.
Unlike CUTLASS 2 convolutions, CUTLASS 3 convolutions do not
distinguish between 2-D and 3-D convolutions by kernel class name.
Instead, for CUTLASS 3 convolutions, the tensor layouts encode
whether the convolution is 2-D or 3-D. Thus, this class deduces
the OperationKind (either Conv2d or Conv3d) from the layouts,
rather than taking it as a constructor parameter.
"""
def __init__(self,
conv_kind: ConvKind,
tile_description: TileDescription,
A: TensorDescription,
B: TensorDescription,
C: TensorDescription,
element_compute: Optional[DataType] = None,
D: Optional[TensorDescription] = None,
kernel_schedule: KernelScheduleType = KernelScheduleType.ScheduleAuto,
epilogue_schedule: EpilogueScheduleType = EpilogueScheduleType.ScheduleAuto,
tile_scheduler: TileSchedulerType = TileSchedulerType.Default,
log_indent_level: int = 1):
log_debug_line(f'ConvOperation3x::init: conv_kind: {conv_kind}', log_indent_level)
log_indent_level = log_indent_level + 1
self.conv_kind = conv_kind
self.tile_description = tile_description
self.A = A
self.B = B
self.C = C
self.element_compute = C.element if element_compute is None else element_compute
self.kernel_schedule = kernel_schedule
self.epilogue_schedule = epilogue_schedule
self.arch = tile_description.minimum_compute_capability
self.tile_scheduler = tile_scheduler
if D == None:
self.D = C
else:
self.D = D
self.is_3x = True
self.group_mode = GroupMode.NoneGroup # CUTLASS 3 convolutions currently aren't grouped
operation_kind = None
for layout in (A.layout, B.layout, C.layout):
assert(isinstance(layout, LayoutType))
new_operation_kind = convolution_tensor_layout_type_to_operation_kind(layout)
if operation_kind is None:
operation_kind = new_operation_kind
else: # CUTLASS 3 convolutions don't permit mixing 2-D and 3-D layouts.
assert(operation_kind == new_operation_kind)
assert(operation_kind is not None)
self.operation_kind = operation_kind
def __str__(self):
return f"ConvOperation3x: operation_kind={self.operation_kind}, conv_kind={self.conv_kind}, tile_description={self.tile_description}"
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
def is_mixed_input(self):
return self.A.element != self.B.element
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
def short_math_name(self):
prefix = ''
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
prefix = 'g'
return prefix + DataTypeNames[self.accumulator_type()]
def is_tensor_op(self):
tensor_ops = [
OpcodeClass.TensorOp,
OpcodeClass.WmmaTensorOp
]
return self.tile_description.math_instruction.opcode_class in tensor_ops
def instruction_shape_string(self):
math_operations_map = {
MathOperation.xor_popc: 'xor',
MathOperation.and_popc: 'and'
}
if self.is_tensor_op():
is0, is1, is2 = self.tile_description.math_instruction.instruction_shape
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
return f"{is0}x{is1}x{is2}{math_op_string}"
else:
return ''
def intermediate_type_string(self):
'''
Name of the distinct intermediate type used by the tensor operation,
or the empty string if none.
Tensor ops (opcode_clas *TensorOp) may use an intermediate data type
that differs from the element type of A or the accumulator type.
'''
if not self.is_tensor_op():
return ''
elif self.tile_description.math_instruction.element_a == self.A.element:
return ''
elif self.tile_description.math_instruction.element_a == self.tile_description.math_instruction.element_accumulator:
return ''
else:
return DataTypeNames[self.tile_description.math_instruction.element_a]
def core_name(self):
inst_shape = self.instruction_shape_string()
intermediate_type = self.intermediate_type_string()
conv_kind_name = ConvKindNames[self.conv_kind]
return f"{self.short_math_name()}{inst_shape}{intermediate_type}{conv_kind_name}"
def extended_name(self):
core_name = self.core_name()
element_a = DataTypeNames[self.A.element]
element_b = DataTypeNames[self.B.element]
element_acc = DataTypeNames[self.tile_description.math_instruction.element_accumulator]
element_c = DataTypeNames[self.C.element]
element_d = DataTypeNames[self.D.element]
return f"{core_name}_{element_a}_{element_b}_{element_acc}_{element_c}_{element_d}"
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
def layout_names(self):
'''Layout strings for A and B, respectively'''
if self.is_complex():
return (ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)],
ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)])
else:
return (ShortLayoutTypeNames[self.A.layout],
ShortLayoutTypeNames[self.B.layout])
def extended_name(self):
core_name = self.core_name()
element_a = DataTypeNames[self.A.element]
element_b = DataTypeNames[self.B.element]
element_acc = DataTypeNames[self.tile_description.math_instruction.element_accumulator]
element_c = DataTypeNames[self.C.element]
element_d = DataTypeNames[self.D.element]
layout_a, layout_b = self.layout_names()
return f"{core_name}_{element_a}{layout_a}_{element_b}{layout_b}_{element_acc}_{element_c}_{element_d}"
def configuration_name(self):
prefix = 'cutlass3x'
arch = self.arch
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
tbm = self.tile_description.tile_shape[0]
tbn = self.tile_description.tile_shape[1]
tbk = self.tile_description.tile_shape[2]
cm = self.tile_description.cluster_shape[0]
cn = self.tile_description.cluster_shape[1]
ck = self.tile_description.cluster_shape[2]
alignment = max(self.A.alignment, self.B.alignment)
tile_scheduler = TileSchedulerSuffixes[self.tile_scheduler]
kernel_schedule = KernelScheduleSuffixes[self.kernel_schedule]
epilogue_schedule = EpilogueScheduleSuffixes[self.epilogue_schedule]
return f"{prefix}_sm{arch}_{opcode_class_name}_{self.extended_name()}_{tbm}x{tbn}x{tbk}_{cm}x{cn}x{ck}_{self.tile_description.stages}_align{alignment}{tile_scheduler}{kernel_schedule}{epilogue_schedule}"
def procedural_name(self):
return self.configuration_name()
def convolution_tensor_layout_type_to_operation_kind(layout: LayoutType) -> OperationKind:
if layout == LayoutType.TensorNHWC or layout == LayoutType.TensorKCSR:
return OperationKind.Conv2d
elif layout == LayoutType.TensorNDHWC or layout == LayoutType.TensorKCSRT:
return OperationKind.Conv3d
else:
raise RuntimeError(f'LayoutType {layout} does not have a corresponding OperationKind')
def CreateConvOperator3x(manifest: Manifest,
dims_and_alignments: Sequence[Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int]]],
tile_descriptions: Sequence[Sequence[TileDescription]],
data_types,
schedule_pairs: Sequence[Tuple[KernelScheduleType, KernelScheduleType]] = \
[(KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto)],
complex_transforms: Optional[Sequence[ComplexTransform]] = None,
tile_schedulers: Sequence[TileSchedulerType] = [TileSchedulerType.Persistent],
conv_kind: ConvKind = ConvKind.Fprop,
log_indent_level: int = 1):
"""
Create zero or more CUTLASS 3 two-dimensional convolution operators.
Create a CUTLASS 3 two-dimensional convolution operator
for all feasible combinations of the input parameters.
Add the operators to the manifest.
dims_and_alignments: 3-level list. Each outer list term is a list [A, B, C].
Each inner list (A, B, or C) has the form [num_spatial_dimensions, alignment].
Both are integers; the first is the number of spatial dimensions
(currently, only 2 or 3 are supported), and the second is the byte alignment.
We deduce the operation_kind (either OperationKind.Conv2d or OperationKind.Conv3d)
from num_spatial_dimensions.
This function doesn't take layouts, unlike the GEMM functions.
CUTLASS 3 convolutions currently support three input layouts:
* TensorNWC for 1-D convolutions,
* TensorNHWC for 2-D convolutions, and
* TensorNDHWC for 3-D convolutions.
Output (C and D) layouts are the same as input layouts,
except for Wgrad convolutions, where the layouts are
* TensorKCS for 1-D convolutions,
* TensorKCSR for 2-D convolutions, and
* TensorKCSRT for 3-D convolutions.
The output layouts are completely constrained by the input layouts
and the convolution kind.
tile_descriptions: 2-level list.
Outer level has one list per math instruction.
Inner level has one TileDescription for each cluster shape.
data_types: Either a single data_type dictionary, or a list of them.
Keys: 'a_type', 'b_type', 'c_type', 'd_type', 'acc_type', 'epi_type'
complex_transforms: Optional list of pairs.
First element of each pair is the complex transform for A, and
second element of each pair is the complex transform for B.
schedule_pairs: [(kernel_schedule, epilogue_schedule), ...]
conv_kind: Convolution kind (Fprop, Dgrad, or Wgrad).
"""
log_debug_line('CreateConvOperator3x', log_indent_level)
log_indent_level = log_indent_level + 1
log_debug_line(f'conv_kind: {conv_kind}', log_indent_level)
for triple in dims_and_alignments:
assert(isinstance(triple, tuple) or isinstance(triple, list))
assert(len(triple) == 3)
spatial_dimensionality = None # to be determined by loop below
for entry in triple: # [A, B, C]
assert(len(entry) == 2)
[dim, alignment] = entry
assert(type(dim) is int)
assert(dim == 2 or dim == 3)
assert(type(alignment) is int)
assert(alignment > 0)
if spatial_dimensionality is None:
spatial_dimensionality = dim
else:
# A, B, and C need to have the same spatial dimensionality
assert(spatial_dimensionality == dim)
def input_and_output_layouts(spatial_dim: int, kind: ConvKind) -> Tuple[LayoutType, LayoutType]:
if spatial_dim == 1:
input_layout = LayoutType.TensorNWC
if kind == ConvKind.Wgrad:
output_layout = LayoutType.TensorKCS
else:
output_layout = input_layout
elif spatial_dim == 2:
input_layout = LayoutType.TensorNHWC
if kind == ConvKind.Wgrad:
output_layout = LayoutType.TensorKCSR
else:
output_layout = input_layout
elif spatial_dim == 3:
input_layout = LayoutType.TensorNDHWC
if kind == ConvKind.Wgrad:
output_layout = LayoutType.TensorKCSRT
else:
output_layout = input_layout
else:
assert(False)
return (input_layout, output_layout)
def dims_to_layouts(A_B_C: Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int]]) -> \
Tuple[Tuple[LayoutType, int], Tuple[LayoutType, int], Tuple[LayoutType, int]]:
[A, B, C] = A_B_C
[spatial_dim, alignment] = A
[input_layout, output_layout] = input_and_output_layouts(spatial_dim, conv_kind)
return ((input_layout, A[1]),
(input_layout, B[1]),
(output_layout, C[1]))
# layouts: list of triples (A, B, C).
# Each of A, B, and C has the form [layout, alignment].
layouts = [dims_to_layouts(A_B_C) for A_B_C in dims_and_alignments]
if type(data_types) is dict:
data_types = [data_types]
for s in schedule_pairs:
assert(len(s) == 2)
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none)]
# product produces a one-pass generator, so the loop must call it anew each time.
def make_combinations():
return product(
layouts,
tile_descriptions,
data_types,
complex_transforms,
schedule_pairs,
tile_schedulers
)
operations = []
for layout_triple, tile_description, data_type, complex_transform_pair, schedule_pair, tile_scheduler in make_combinations():
A_layout, A_alignment = layout_triple[0]
A_xform = complex_transform_pair[0]
B_layout, B_alignment = layout_triple[1]
B_xform = complex_transform_pair[1]
C_layout, C_alignment = layout_triple[2]
D_layout = C_layout
D_alignment = C_alignment
A = TensorDescription(data_type["a_type"], A_layout, A_alignment, A_xform)
B = TensorDescription(data_type["b_type"], B_layout, B_alignment, B_xform)
C = TensorDescription(data_type["c_type"], C_layout, C_alignment)
D = TensorDescription(data_type["d_type"], D_layout, D_alignment)
element_compute = data_type.get("epi_type", data_type["acc_type"])
kernel_schedule, epilogue_schedule = schedule_pair
operation = ConvOperation3x(conv_kind=conv_kind,
tile_description=tile_description,
A=A,
B=B,
C=C,
element_compute=element_compute,
D=D,
kernel_schedule=kernel_schedule,
epilogue_schedule=epilogue_schedule,
tile_scheduler=tile_scheduler,
log_indent_level=log_indent_level)
log_debug_line(f'Created ConvOperation3x: {str(operation)}', log_indent_level)
manifest.append(operation)
operations.append(operation)
return operations
###################################################################################################
###################################################################################################
#
def GenerateSM50_Simt(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
MathInstruction( \
[1, 1, 1], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 50
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
if math_inst.element_a == DataType.f32:
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM50_Simt_complex(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add_complex),
]
min_cc = 50
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32,
DataType.cf32,
DataType.cf32,
DataType.cf32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM50(manifest, cuda_version):
GenerateSM50_Simt(manifest, cuda_version)
GenerateSM50_Simt_complex(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM60_Simt(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 60
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
def GenerateSM60_Simt_DepthwiseConv2d(manifest, cuda_version):
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 60
max_cc = 1024
alignment_constraints = [8,]
filter_3x3 = [3, 3]
filter_5x5 = [5, 5]
# [stride_h, stride_w]
# [-1, -1] means all stride size.
strides = [[-1,-1], [1, 1], [2, 2]]
# [dilation_h, dilation_w]
# [-1, -1] means all dilation size.
dilations = [[-1,-1], [1, 1], [2, 2]]
#groups per thread block
g16 = 16
g32 = 32
g64 = 64
#output shape per thread block
npq_1x4x4 = [1, 4, 4]
npq_1x8x8 = [1, 8, 8]
npq_1x10x10 = [1, 10, 10]
tile_descriptions = []
for math_inst in math_instructions:
for stride, dilation in product(strides, dilations):
tile_descriptions.extend([
# filter3x3 ThreadBlock_output, filter, stage, warp
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g32], filter_3x3, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g64], filter_3x3, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g16], filter_3x3, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x10x10+[g64], filter_3x3, 2, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g32], filter_3x3, 4, stride, dilation, [4, 1, 1], math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g64], filter_3x3, 4, stride, dilation,[4, 1, 1], math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g16], filter_3x3, 4, stride, dilation, [4, 1, 1], math_inst, min_cc, max_cc),
# filter5x5 ThreadBlock_output, filter, stage, warp
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g32], filter_5x5, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g64], filter_5x5, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g16], filter_5x5, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x10x10+[g64], filter_5x5, 2, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g32], filter_5x5, 4, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g64], filter_5x5, 4, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g16], filter_5x5, 4, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc)
])
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateDepthwiseConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM60(manifest, cuda_version):
GenerateSM60_Simt(manifest, cuda_version)
GenerateSM60_Simt_DepthwiseConv2d(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM61_Simt(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 4], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 61
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 32], 2, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
#
#
def GenerateSM61(manifest, cuda_version):
GenerateSM61_Simt(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM70_TensorOp_884(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 1):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[8, 8, 4], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[8, 8, 4], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 70
max_cc = 75
alignment_constraints = [8, 4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints)
#
def GenerateSM70_PlanarComplexTensorOp_884(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 1):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
math_instructions = [
MathInstruction( \
[8, 8, 4], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[8, 8, 4], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 70
max_cc = 75
alignment_constraints = [8, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, complex_transforms)
#
def GenerateSM70_WmmaTensorOp_161616(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 16, 16], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.WmmaTensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 16, 16], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.WmmaTensorOp, \
MathOperation.multiply_add),
]
min_cc = 70
max_cc = 1024
alignment_constraints = [8,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
#
##################################################################################################
#
def GenerateSM70(manifest, cuda_version):
GenerateSM70_TensorOp_884(manifest, cuda_version)
GenerateSM70_PlanarComplexTensorOp_884(manifest, cuda_version)
# To limit build size, WMMA GEMMs are disabled for now.
#
#GenerateSM70_WmmaTensorOp_161616(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM75_TensorOp_1688_FewChannels(manifest, cuda_version, math_inst):
min_cc = 75
max_cc = 1024
tile_descriptions = [
TileDescription([128, 64, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 2, [2, 2, 2], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type, [4, 8])
CreateConv2dFewChannelsOperator(manifest, conv_layout, tile_descriptions, data_type, [1, 2, 4])
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, [4, 8])
CreateConv2dFewChannelsOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, [1, 2, 4])
#
def GenerateSM75_TensorOp_1688(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [8, 4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 2, [1, 2, 2], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints)
# Separate generator for 'few channels' specializations
GenerateSM75_TensorOp_1688_FewChannels(manifest, cuda_version, math_inst)
#
#
def GenerateSM75_PlanarComplexTensorOp_1688(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [8, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([ 64, 128, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, complex_transforms)
#
def GenerateSM75_TensorOp_8816_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[8, 8, 16], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[8, 8, 16], \
DataType.u8, DataType.u8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 75
max_cc = 90
alignment_constraints = [16,]
alignment_constraints_small_channels = [16, 8, 4]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 64], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 256, 64], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 32, 64], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 32, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
DataType.s32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints_small_channels, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dFewChannelsOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints_small_channels, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
op.C.alignment = 16
else:
op.C.alignment = 8
#
#
def GenerateSM75_TensorOp_8816_Interleaved(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.ColumnMajorInterleaved32, LayoutType.RowMajorInterleaved32, LayoutType.ColumnMajorInterleaved32),
]
math_instructions = [
MathInstruction( \
[8, 8, 16], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[8, 8, 16], \
DataType.u8, DataType.u8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 75
max_cc = 90
alignment_constraints = [16,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 64], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
operations = CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNC32HW32, LayoutType.TensorC32RSK32, LayoutType.TensorNC32HW32)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
op.C.alignment = 8
#
#
def GenerateSM75_TensorOp_8832_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[8, 8, 32], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[8, 8, 32], \
DataType.u4, DataType.u4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 75
max_cc = 89
alignment_constraints = [32,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 128], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
DataType.s32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
op.C.alignment = 16
elif op.tile_description.threadblock_shape[1] == 64:
op.C.alignment = 8
else:
op.C.alignment = 8
#
#
def GenerateSM75_TensorOp_8832_Interleaved(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.ColumnMajorInterleaved64, LayoutType.RowMajorInterleaved64, LayoutType.ColumnMajorInterleaved64),
]
math_instructions = [
MathInstruction( \
[8, 8, 32], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[8, 8, 32], \
DataType.u4, DataType.u4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 75
max_cc = 89
alignment_constraints = [32,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 128], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
operations = CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNC64HW64, LayoutType.TensorC64RSK64, LayoutType.TensorNC64HW64)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
op.C.alignment = 16
#
#
def GenerateSM75_TensorOp_88128(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[8, 8, 128], \
DataType.b1, DataType.b1, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.xor_popc),
]
min_cc = 75
max_cc = {
MathOperation.xor_popc: 89,
MathOperation.and_popc: 90
}
alignment_constraints = [128,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 512], 2, [4, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 256, 512], 2, [2, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 128, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 256, 512], 2, [1, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([256, 64, 512], 2, [4, 1, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 128, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 64, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 64, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
]
data_type = [DataType.b1, DataType.b1, DataType.s32, DataType.s32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM75_WmmaTensorOp_161616(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 16, 16], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.WmmaTensorOp, \
MathOperation.multiply_add),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [16,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
DataType.f32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
#
#
def GenerateSM75_Simt_complex(manifest, cuda_version):
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add_complex),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc)
]
data_type = [
DataType.cf32,
DataType.cf32,
DataType.cf32,
DataType.cf32
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
def GenerateSM75(manifest, cuda_version):
GenerateSM75_TensorOp_1688(manifest, cuda_version)
GenerateSM75_PlanarComplexTensorOp_1688(manifest, cuda_version)
GenerateSM75_TensorOp_8816_TN(manifest, cuda_version)
GenerateSM75_TensorOp_8816_Interleaved(manifest, cuda_version)
GenerateSM75_TensorOp_8832_TN(manifest, cuda_version)
GenerateSM75_TensorOp_8832_Interleaved(manifest, cuda_version)
GenerateSM75_TensorOp_88128(manifest, cuda_version)
#GenerateSM75_WmmaTensorOp_161616(manifest, cuda_version)
GenerateSM75_Simt_complex(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM80_TensorOp_16816(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 16], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 16], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 16], \
DataType.bf16, DataType.bf16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [8, 4, 2]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 3, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
CreateGemmGroupedOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type, [4, 8])
CreateConv3dOperator(manifest, LayoutType.TensorNDHWC, tile_descriptions, data_type, 8)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints)
CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, [4, 8])
CreateConv3dOperator(manifest, LayoutType.TensorNDHWC, tile_descriptions, data_type_mixed, 8)
#
#
def GenerateSM80_SparseTensorOp_16832(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 1):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.RowMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.RowMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 32], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 32], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 32], \
DataType.bf16, DataType.bf16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [8]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([ 64, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
#
#
def GenerateSM80_PlanarComplexTensorOp_16816(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
math_instructions = [
MathInstruction( \
[16, 8, 16], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 16], \
DataType.bf16, DataType.bf16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 16], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [8, ]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([ 64, 128, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, complex_transforms)
#
def GenerateSM80_TensorOp_16816_mixed_input_upcast_a(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
# Upcast on Operand A
math_instructions = [
MathInstruction( \
[16, 8, 16], \
DataType.s8, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_mixed_input_upcast),
MathInstruction( \
[16, 8, 16], \
DataType.s8, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_mixed_input_upcast),
MathInstruction( \
[16, 8, 16], \
DataType.u8, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_mixed_input_upcast),
MathInstruction( \
[16, 8, 16], \
DataType.u8, DataType.bf16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_mixed_input_upcast),
MathInstruction( \
[16, 8, 16], \
DataType.s8, DataType.bf16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_mixed_input_upcast),
]
min_cc = 80
max_cc = 1024
# For mixed-input alignment constraints are a list of lists, where the
# inner list contains the alignment constraints for operands/matrices
# [[alignA, alignB, alignC],..]
alignment_constraints = [[16, 8, 8],]
for math_inst in math_instructions:
tile_descriptions = [
# 128x128
TileDescription([128, 128, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
# 128x64
TileDescription([128, 64, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
# 128x32
TileDescription([128, 32, 64], 9, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc),
# 128x16
TileDescription([128, 16, 64], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 16, 64], 3, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
# streamk uses more regs which can cause spill for the biggest warp tile size when the accumulators are 32bit.
operations = CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination, SwizzlingFunctor.Identity8)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_b,
math_inst.element_accumulator,
]
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombination, SwizzlingFunctor.Identity8)
for op in operations:
if (DataTypeSize[op.C.element] == 16) and \
(op.tile_description.threadblock_shape[1] <= 32):
op.C.alignment = 4
#
def GenerateSM80_TensorOp_16816_mixed_input_upcast_b(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 16], \
DataType.f16, DataType.s8, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_mixed_input_upcast),
MathInstruction( \
[16, 8, 16], \
DataType.bf16, DataType.s8, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_mixed_input_upcast),
MathInstruction( \
[16, 8, 16], \
DataType.f16, DataType.u8, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_mixed_input_upcast),
MathInstruction( \
[16, 8, 16], \
DataType.bf16, DataType.u8, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_mixed_input_upcast),
]
min_cc = 80
max_cc = 1024
# For mixed-input alignment constraints are a list of lists, where the
# inner list contains the alignment constraints for operands/matrices
# [[alignA, alignB, alignC],..]
alignment_constraints = [[8, 16, 8],]
for math_inst in math_instructions:
tile_descriptions = [
# 128x128
TileDescription([128, 128, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
# 128x64
TileDescription([128, 64, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
# 128x32
TileDescription([128, 32, 64], 9, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 32], 9, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
# 128x16
TileDescription([128, 16, 64], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 16, 64], 3, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 16, 32], 9, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 16, 32], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 16, 32], 3, [2, 1, 1], math_inst, min_cc, max_cc),
# 256x16
TileDescription([256, 16, 32], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([256, 16, 32], 3, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
# streamk uses more regs which can cause spill for the biggest warp tile size when the accumulators are 32bit.
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination, SwizzlingFunctor.Identity8)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
operations = CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombination, SwizzlingFunctor.Identity8)
for op in operations:
if op.tile_description.threadblock_shape[1] <= 32:
op.C.alignment = 4
#
def GenerateSM80_TensorOp_16832_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 32], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[16, 8, 32], \
DataType.u8, DataType.u8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 80
max_cc = 1024
smem_usage = 164
alignment_constraints = [16,]
alignment_constraints_small_channels = [16, 8, 4]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 64], 6, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 64], 6, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, DataType.s32]
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints_small_channels, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dFewChannelsOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints_small_channels, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
if op.tile_description.threadblock_shape[0] == 32:
op.C.alignment = 8
else:
op.C.alignment = 16
else:
op.C.alignment = 8
#
#
def GenerateSM80_SparseTensorOp_16864_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 1):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
]
math_inst = \
MathInstruction( \
[16, 8, 64], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate)
min_cc = 80
max_cc = 1024
alignment_constraints = [16,]
tile_descriptions = [
TileDescription([128, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 256], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.s8, DataType.s8, DataType.s32, DataType.s32]
data_type_mixed = [DataType.s8, DataType.s8, DataType.s8, DataType.f32]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
operations = []
operations += CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
op.C.alignment = 16
else:
op.C.alignment = 8
#
#
def GenerateSM80_TensorOp_16832_Interleaved(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajorInterleaved32, LayoutType.RowMajorInterleaved32, LayoutType.ColumnMajorInterleaved32),
]
math_instructions = [
MathInstruction( \
[16, 8, 32], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[16, 8, 32], \
DataType.u8, DataType.u8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [16,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 10, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32]
operations = CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNC32HW32, LayoutType.TensorC32RSK32, LayoutType.TensorNC32HW32)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
op.C.alignment = 8
#
#
def GenerateSM80_TensorOp_16864_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 64], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[16, 8, 64], \
DataType.u4, DataType.u4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [32,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 256], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 256], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 256], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 256], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 256], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 256], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, DataType.s32]
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
op.C.alignment = 16
elif op.tile_description.threadblock_shape[1] == 64:
op.C.alignment = 8
else:
op.C.alignment = 8
#
#
def GenerateSM80_SparseTensorOp_168128_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 1):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
]
math_inst = \
MathInstruction( \
[16, 8, 128], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate)
min_cc = 80
max_cc = 1024
alignment_constraints = [32,]
tile_descriptions = [
TileDescription([ 64, 64, 256], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 256], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 256], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 256], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 256], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 256], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 512], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 512], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 512], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 512], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.s4, DataType.s4, DataType.s32, DataType.s32]
data_type_mixed = [DataType.s4, DataType.s4, DataType.s4, DataType.f32]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
operations = []
operations += CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] > 128:
op.C.alignment = 16
else:
op.C.alignment = 8
#
#
def GenerateSM80_TensorOp_16864_Interleaved(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajorInterleaved64, LayoutType.RowMajorInterleaved64, LayoutType.ColumnMajorInterleaved64),
]
math_instructions = [
MathInstruction( \
[16, 8, 64], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[16, 8, 64], \
DataType.u4, DataType.u4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [32,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32]
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNC64HW64, LayoutType.TensorC64RSK64, LayoutType.TensorNC64HW64)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
op.C.alignment = 16
#
#
def GenerateSM80_TensorOp_168256(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 256], \
DataType.b1, DataType.b1, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.xor_popc),
MathInstruction( \
[16, 8, 256], \
DataType.b1, DataType.b1, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.and_popc),
]
min_cc = 80
max_cc = {
MathOperation.xor_popc: 89,
MathOperation.and_popc: 90
}
alignment_constraints = [128,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 512], 3, [4, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 256, 512], 3, [2, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([256, 64, 512], 4, [4, 1, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 256, 512], 4, [1, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 128, 512], 5, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 64, 512], 6, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 128, 512], 6, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 64, 512], 10, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([256, 128, 1024], 3, [4, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 256, 1024], 3, [2, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([256, 64, 1024], 4, [4, 1, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 256, 1024], 4, [1, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 128, 1024], 4, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 64, 1024], 3, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 128, 1024], 3, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 64, 1024], 5, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
]
data_type = [DataType.b1, DataType.b1, DataType.s32, DataType.s32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
]
min_cc = 80
max_cc = 1024
alignment_constraints = [4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688_fast_math(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f16),
MathInstruction( \
[16, 8, 8], \
DataType.bf16, DataType.bf16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_bf16),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688_fast_fp32_math(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f32),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
def GenerateSM80_TensorOp_1688_fast_fp32_math_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_fast_f32)
min_cc = 80
max_cc = 1024
tile_descriptions = [
TileDescription([128, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
def GenerateSM80_SparseTensorOp_16816_fast_math(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 1):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.RowMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.RowMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 16], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [4]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
tile_descriptions = [
TileDescription([128, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_1688_rank_k(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f32),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1, 2, 4] # Alignment only applies to A in SYRK
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32]
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM80_TensorOp_1688_rank_k_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_fast_f32),
]
min_cc = 80
max_cc = 1024
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
# SYRK
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_1688_trmm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f32),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1, 2, 4]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688_trmm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_fast_f32),
]
min_cc = 80
max_cc = 1024
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_1688_symm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
# A and B have same layouts
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f32),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [
1, 2, 4
]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM80_TensorOp_1688_symm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_fast_f32),
]
min_cc = 80
max_cc = 1024
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
# SYMM
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_884(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 16], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([32, 256, 16], 3, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_884_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8 ], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8 ], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8 ], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 8 ], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 8 ], 4, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 3, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
def GenerateSM80_TensorOp_884_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_884_rank_k(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64]
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM80_TensorOp_884_rank_k_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64]
# SYRK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_884_rank_k_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [ComplexTransform.none,]
# SYRK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_884_trmm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_884_trmm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_884_trmm_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_884_symm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM80_TensorOp_884_symm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
# SYMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_884_symm_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [ComplexTransform.none,]
# SYMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
###################################################################################################
#
def GenerateSM80_Simt_f32(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 8], 5, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 8], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 8], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 8], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 5, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM80_Simt_f64(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 5, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
##################################################################################################
#
def GenerateSM80_Simt_complex(manifest, cuda_version):
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add_complex),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
data_type = [
DataType.cf32,
DataType.cf32,
DataType.cf32,
DataType.cf32
]
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints, complex_transforms)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
###################################################################################################
#
def GenerateSM80(manifest, cuda_version):
GenerateSM80_TensorOp_16816(manifest, cuda_version)
GenerateSM80_SparseTensorOp_16832(manifest, cuda_version)
GenerateSM80_PlanarComplexTensorOp_16816(manifest, cuda_version)
GenerateSM80_TensorOp_1688(manifest, cuda_version)
GenerateSM80_TensorOp_1688_fast_math(manifest, cuda_version)
GenerateSM80_SparseTensorOp_16816_fast_math(manifest, cuda_version)
GenerateSM80_TensorOp_1688_complex(manifest, cuda_version)
# 3xTF32
GenerateSM80_TensorOp_1688_fast_fp32_math(manifest, cuda_version)
GenerateSM80_TensorOp_1688_fast_fp32_math_complex(manifest, cuda_version)
GenerateSM80_TensorOp_1688_rank_k(manifest, cuda_version)
GenerateSM80_TensorOp_1688_rank_k_complex(manifest, cuda_version)
GenerateSM80_TensorOp_1688_trmm(manifest, cuda_version)
GenerateSM80_TensorOp_1688_trmm_complex(manifest, cuda_version)
GenerateSM80_TensorOp_1688_symm(manifest, cuda_version)
GenerateSM80_TensorOp_1688_symm_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884(manifest, cuda_version)
GenerateSM80_TensorOp_884_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884_complex_gaussian(manifest, cuda_version)
GenerateSM80_TensorOp_884_rank_k(manifest, cuda_version)
GenerateSM80_TensorOp_884_rank_k_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884_rank_k_complex_gaussian(manifest, cuda_version)
GenerateSM80_TensorOp_884_trmm(manifest, cuda_version)
GenerateSM80_TensorOp_884_trmm_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884_trmm_complex_gaussian(manifest, cuda_version)
GenerateSM80_TensorOp_884_symm(manifest, cuda_version)
GenerateSM80_TensorOp_884_symm_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884_symm_complex_gaussian(manifest, cuda_version)
GenerateSM80_TensorOp_16816_mixed_input_upcast_a(manifest, cuda_version)
GenerateSM80_TensorOp_16816_mixed_input_upcast_b(manifest, cuda_version)
GenerateSM80_TensorOp_16832_TN(manifest, cuda_version)
GenerateSM80_SparseTensorOp_16864_TN(manifest, cuda_version)
GenerateSM80_TensorOp_16832_Interleaved(manifest, cuda_version)
GenerateSM80_TensorOp_16864_TN(manifest, cuda_version)
GenerateSM80_SparseTensorOp_168128_TN(manifest, cuda_version)
GenerateSM80_TensorOp_16864_Interleaved(manifest, cuda_version)
GenerateSM80_TensorOp_168256(manifest, cuda_version)
GenerateSM80_Simt_f32(manifest, cuda_version)
GenerateSM80_Simt_f64(manifest, cuda_version)
GenerateSM80_Simt_complex(manifest, cuda_version)
###################################################################################################
def GenerateSM89_TensorOp_16832_fp8(manifest, cuda_version):
if (
not CudaToolkitVersionSatisfies(cuda_version, 12, 4)
):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor)
]
math_instructions = [
MathInstruction(
[16, 8, 32],
DataType.e4m3, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[16, 8, 32],
DataType.e4m3, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[16, 8, 32],
DataType.e5m2, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[16, 8, 32],
DataType.e5m2, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[16, 8, 32],
DataType.e4m3, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add_fast_accum),
MathInstruction(
[16, 8, 32],
DataType.e4m3, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add_fast_accum),
MathInstruction(
[16, 8, 32],
DataType.e5m2, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add_fast_accum),
MathInstruction(
[16, 8, 32],
DataType.e5m2, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add_fast_accum),
]
min_cc = 89
max_cc = 89
alignment_constraints = [16,]
alignment_constraints_small_channels = [16, 8, 4]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 64], 6, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 6, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 3, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 64], 6, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 64], 6, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_types = [
[
math_inst.element_a,
math_inst.element_b,
DataType.f32,
math_inst.element_accumulator
],
]
operations = []
for data_type in data_types:
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, data_type,
alignment_constraints, None, EpilogueFunctor.LinearCombination)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
operations += CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions,
data_type, alignment_constraints_small_channels, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
if op.tile_description.threadblock_shape[0] == 32:
op.C.alignment = 8
else:
op.C.alignment = 16
else:
op.C.alignment = 8
#
def GenerateSM89_SparseTensorOp_16864_fp8(manifest, cuda_version):
if (
not CudaToolkitVersionSatisfies(cuda_version, 12, 4)
):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor)
]
math_instructions = [
MathInstruction(
[16, 8, 64],
DataType.e4m3, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[16, 8, 64],
DataType.e4m3, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[16, 8, 64],
DataType.e5m2, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[16, 8, 64],
DataType.e5m2, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[16, 8, 64],
DataType.e4m3, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add_fast_accum),
MathInstruction(
[16, 8, 64],
DataType.e4m3, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add_fast_accum),
MathInstruction(
[16, 8, 64],
DataType.e5m2, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add_fast_accum),
MathInstruction(
[16, 8, 64],
DataType.e5m2, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add_fast_accum),
]
min_cc = 89
max_cc = 89
alignment_constraints = [16,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 256], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_types = [
[
math_inst.element_a,
math_inst.element_b,
DataType.f32,
math_inst.element_accumulator
],
]
operations = []
for data_type in data_types:
operations += CreateSparseGemmOperator(manifest, layouts, tile_descriptions, data_type,
alignment_constraints, None, EpilogueFunctor.LinearCombination)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
op.C.alignment = 16
else:
op.C.alignment = 8
###################################################################################################
#
def GenerateSM89(manifest, cuda_version):
GenerateSM89_TensorOp_16832_fp8(manifest, cuda_version)
GenerateSM89_SparseTensorOp_16864_fp8(manifest, cuda_version)
###################################################################################################
#
def GenerateSM90_TensorOp_16b_WGMMA_gemm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 12, 0):
return
# layouts for ABC and their alignments.
layouts = [
[[LayoutType.ColumnMajor, 8], [LayoutType.ColumnMajor, 8], [LayoutType.ColumnMajor, 1]],
[[LayoutType.ColumnMajor, 8], [LayoutType.RowMajor, 8], [LayoutType.ColumnMajor, 1]],
[[LayoutType.RowMajor, 8], [LayoutType.ColumnMajor, 8], [LayoutType.ColumnMajor, 1]],
[[LayoutType.RowMajor, 8], [LayoutType.RowMajor, 8], [LayoutType.ColumnMajor, 1]],
]
math_instructions = [
MathInstruction(
[64, 128, 16],
DataType.f16, DataType.f16, DataType.f16,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 16],
DataType.f16, DataType.f16, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 16],
DataType.bf16, DataType.bf16, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 256, 16],
DataType.f16, DataType.f16, DataType.f16,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 256, 16],
DataType.f16, DataType.f16, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 256, 16],
DataType.bf16, DataType.bf16, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
]
min_cc = 90
max_cc = 90
for math_inst in math_instructions:
tile_descriptions = []
tile_descriptions_small = []
tile_descriptions_medium = []
tile_descriptions_large = []
if math_inst.instruction_shape[1] == 128:
tile_descriptions_small = [
# Not compatible with TmaWarpSpecializedCooperative
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1]),
]
tile_descriptions_medium = [
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
]
tile_descriptions_large = [
TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1]),
]
tile_descriptions = tile_descriptions_medium + tile_descriptions_large
else:
tile_descriptions = [
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 2, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 2, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 2, 1], math_inst, min_cc, max_cc, [1,1,1]),
]
data_type = {
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : math_inst.element_accumulator,
"d_type" : math_inst.element_accumulator,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
}
# Set alignment c based on Destination format.
for layout in layouts:
if data_type["c_type"] in [DataType.s32, DataType.f32]:
layout[2][1] = 4
elif data_type["c_type"] in [DataType.f16, DataType.bf16]:
layout[2][1] = 8
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
schedules = [
[KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized]
]
stream_k_schedules = [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized]]
else:
schedules = [
[KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto],
[KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized]
# TmaWarpSpecializedCooperative and TmaWarpSpecializedPingpong require CUDA version >= 12.1 for optimal performance.
]
stream_k_schedules = []
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, schedules)
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
# Add stream-K variants
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, stream_k_schedules, tile_schedulers=[TileSchedulerType.StreamK])
# persistent kernels with TMA epilogues
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
# not enough smem for 256x128 f32 out with C allocation
if data_type["d_type"] == DataType.f32 and len(tile_descriptions_medium) > 0:
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions_medium, data_type,
[[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions_medium, data_type,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
tile_schedulers=[TileSchedulerType.StreamK])
else:
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
tile_schedulers=[TileSchedulerType.StreamK])
# Emit instance without C allocation + load
data_type["c_type"] = DataType.void
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
tile_schedulers=[TileSchedulerType.StreamK])
# for mixed precision kernels, also generate kernels that write output matrix in the A/B format
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = {
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : math_inst.element_a,
"d_type" : math_inst.element_a,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
}
# Set alignment c based on Destination format.
for layout in layouts:
if data_type_mixed["c_type"] in [DataType.s32, DataType.f32]:
layout[2][1] = 4
elif data_type_mixed["c_type"] in [DataType.f16, DataType.bf16]:
layout[2][1] = 8
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed, schedules)
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed, stream_k_schedules, tile_schedulers=[TileSchedulerType.StreamK])
# persistent kernels with TMA epilogues
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed,
[[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
tile_schedulers=[TileSchedulerType.StreamK])
# Emit instance without C allocation+load
data_type_mixed["c_type"] = DataType.void
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed,
[[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
tile_schedulers=[TileSchedulerType.StreamK])
#
def GenerateSM90_TensorOp_16b_WGMMA_alignx_gemm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 12, 0):
return
# layouts for ABC and their alignments.
layouts = [
[[LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 1]],
[[LayoutType.RowMajor, 4], [LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 1]],
[[LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 1]],
[[LayoutType.ColumnMajor, 4], [LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 1]],
[[LayoutType.RowMajor, 2], [LayoutType.ColumnMajor, 2], [LayoutType.ColumnMajor, 1]],
[[LayoutType.RowMajor, 2], [LayoutType.RowMajor, 2], [LayoutType.ColumnMajor, 1]],
[[LayoutType.ColumnMajor, 2], [LayoutType.ColumnMajor, 2], [LayoutType.ColumnMajor, 1]],
[[LayoutType.ColumnMajor, 2], [LayoutType.RowMajor, 2], [LayoutType.ColumnMajor, 1]],
]
math_instructions = [
MathInstruction(
[64, 128, 16],
DataType.f16, DataType.f16, DataType.f16,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 16],
DataType.f16, DataType.f16, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 16],
DataType.bf16, DataType.bf16, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
]
min_cc = 90
max_cc = 90
for math_inst in math_instructions:
tile_descriptions_small = [
# TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
# 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1]),
]
tile_descriptions_medium = [
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1]),
# TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1]*2, math_inst.instruction_shape[2]*4],
# 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1]),
]
tile_descriptions = tile_descriptions_small + tile_descriptions_medium
data_type = {
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : math_inst.element_accumulator,
"d_type" : math_inst.element_accumulator,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
}
# Set alignment c based on Destination format.
for layout in layouts:
if data_type["c_type"] in [DataType.s32, DataType.f32]:
layout[2][1] = 4
elif data_type["c_type"] in [DataType.f16, DataType.bf16]:
layout[2][1] = 8
schedules = [
# [KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto],
[KernelScheduleType.CpAsyncWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized]
]
stream_k_schedules = []
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
schedules += [
[KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized],
# [KernelScheduleType.CpAsyncWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized]
]
stream_k_schedules += [[KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized]]
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, schedules)
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
# Add stream-K variants
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, stream_k_schedules, tile_schedulers=[TileSchedulerType.StreamK])
# persistent kernels with TMA epilogues
# if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
# CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
# [[KernelScheduleType.CpAsyncWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
# [KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
# CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
# [[KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
# tile_schedulers=[TileSchedulerType.StreamK])
# # Emit instance without C allocation + load
# data_type["c_type"] = DataType.void
# CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
# [[KernelScheduleType.CpAsyncWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
# [KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
# CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
# [[KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
# tile_schedulers=[TileSchedulerType.StreamK])
# for mixed precision kernels, also generate kernels that write output matrix in the A/B format
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = {
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : math_inst.element_a,
"d_type" : math_inst.element_a,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
}
# Set alignment c based on Destination format.
for layout in layouts:
if data_type_mixed["c_type"] in [DataType.s32, DataType.f32]:
layout[2][1] = 4
elif data_type_mixed["c_type"] in [DataType.f16, DataType.bf16]:
layout[2][1] = 8
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed, schedules)
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed, stream_k_schedules, tile_schedulers=[TileSchedulerType.StreamK])
# persistent kernels with TMA epilogues
# if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
# CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed,
# [[KernelScheduleType.CpAsyncWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
# [KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
# CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed,
# [[KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
# tile_schedulers=[TileSchedulerType.StreamK])
# # Emit instance without C allocation+load
# data_type_mixed["c_type"] = DataType.void
# CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed,
# [[KernelScheduleType.CpAsyncWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
# [KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
# CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed,
# [[KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
# tile_schedulers=[TileSchedulerType.StreamK])
#
def GenerateSM90_TensorOp_tf32_WGMMA_gemm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 12, 0):
return
# layouts for ABC and their alignments
layouts_tf32 = [
[[LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 4]],
[[LayoutType.RowMajor, 4], [LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 4]],
[[LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 4]],
[[LayoutType.ColumnMajor, 4], [LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 4]],
]
math_inst = MathInstruction(
[64, 128, 8],
DataType.tf32, DataType.tf32, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add)
math_inst_largeN = MathInstruction(
[64, 256, 8],
DataType.tf32, DataType.tf32, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add)
min_cc = 90
max_cc = 90
tile_descriptions_large = [
TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst_largeN.instruction_shape[0]*2, math_inst_largeN.instruction_shape[1], math_inst_largeN.instruction_shape[2]*4],
0, [4, 1, 1], math_inst_largeN, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst_largeN.instruction_shape[0]*2, math_inst_largeN.instruction_shape[1], math_inst_largeN.instruction_shape[2]*4],
0, [4, 1, 1], math_inst_largeN, min_cc, max_cc, [1,2,1]),
]
tile_descriptions_medium = [
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
]
tile_descriptions_small = [
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
]
tile_descriptions = tile_descriptions_medium + tile_descriptions_small
data_types = [
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : math_inst.element_accumulator,
"d_type" : math_inst.element_accumulator,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : DataType.f32,
"b_type" : DataType.f32,
"c_type" : math_inst.element_accumulator,
"d_type" : math_inst.element_accumulator,
"acc_type" : math_inst.element_accumulator,
"epi_type" : DataType.f32
}
]
schedules_default = [
[KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto],
[KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized],
]
# TMA kernels with TT layout use EpilogueTransposed (NoSmemWarpSpecialized with swapped strides),
# because they use NN kernels underneath and transposing its epilogue will get the correct output
schedules_transposed_epilogue = [
[KernelScheduleType.ScheduleAuto, EpilogueScheduleType.EpilogueTransposed],
[KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.EpilogueTransposed]
]
# TMA kernels with TN, NN, or NT layout
layouts_tf32_tn_nn_nt = [layouts_tf32[0], layouts_tf32[2], layouts_tf32[3]]
# TMA kernels with TT layout
layouts_tf32_tt = [layouts_tf32[1]]
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tn_nn_nt, tile_descriptions_small, data_types, [
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized]
])
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tn_nn_nt, tile_descriptions_medium, data_types, [
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized]
])
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tn_nn_nt, tile_descriptions_large, data_types, [
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized],
])
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tt, tile_descriptions_small, data_types, [
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.EpilogueTransposed]
])
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tt, tile_descriptions_medium, data_types, [
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.EpilogueTransposed],
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.EpilogueTransposed]
])
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tt, tile_descriptions_large, data_types, [
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.EpilogueTransposed],
])
else:
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tn_nn_nt, tile_descriptions, data_types, schedules_default)
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tt, tile_descriptions, data_types, schedules_transposed_epilogue)
#
def GenerateSM90_TensorOp_tf32_WGMMA_alignx_gemm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 12, 0):
return
# layouts for ABC and their alignments.
layouts = [
[[LayoutType.RowMajor, 2], [LayoutType.ColumnMajor, 2], [LayoutType.ColumnMajor, 1]],
[[LayoutType.RowMajor, 2], [LayoutType.RowMajor, 2], [LayoutType.ColumnMajor, 1]],
[[LayoutType.ColumnMajor, 2], [LayoutType.ColumnMajor, 2], [LayoutType.ColumnMajor, 1]],
[[LayoutType.ColumnMajor, 2], [LayoutType.RowMajor, 2], [LayoutType.ColumnMajor, 1]],
[[LayoutType.RowMajor, 1], [LayoutType.ColumnMajor, 1], [LayoutType.ColumnMajor, 1]],
[[LayoutType.RowMajor, 1], [LayoutType.RowMajor, 1], [LayoutType.ColumnMajor, 1]],
[[LayoutType.ColumnMajor, 1], [LayoutType.ColumnMajor, 1], [LayoutType.ColumnMajor, 1]],
[[LayoutType.ColumnMajor, 1], [LayoutType.RowMajor, 1], [LayoutType.ColumnMajor, 1]],
]
math_inst = MathInstruction(
[64, 128, 8],
DataType.tf32, DataType.tf32, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add)
min_cc = 90
max_cc = 90
tile_descriptions_medium = [
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1])
]
tile_descriptions_small = [
# TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
# 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1])
]
tile_descriptions = tile_descriptions_medium + tile_descriptions_small
data_types = [
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : math_inst.element_accumulator,
"d_type" : math_inst.element_accumulator,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : DataType.f32,
"b_type" : DataType.f32,
"c_type" : math_inst.element_accumulator,
"d_type" : math_inst.element_accumulator,
"acc_type" : math_inst.element_accumulator,
"epi_type" : DataType.f32
}
]
is_tt_layout = lambda v: v[0][0] == LayoutType.RowMajor and v[1][0] == LayoutType.RowMajor
# Split kernels into TN/NT, NN or TT layouts
layouts_tn_nn_nt = filter(lambda v: not is_tt_layout(v), layouts)
layouts_tt = filter(is_tt_layout, layouts)
CreateGemmUniversal3xOperator(manifest, layouts_tn_nn_nt, tile_descriptions, data_types, [
# [KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto],
[KernelScheduleType.CpAsyncWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized],
])
# Kernels with TT layout use EpilogueTransposed (NoSmemWarpSpecialized with swapped strides),
# because they use NN kernels underneath and transposing its epilogue will get the correct output
CreateGemmUniversal3xOperator(manifest, layouts_tt, tile_descriptions, data_types, [
# [KernelScheduleType.ScheduleAuto, EpilogueScheduleType.EpilogueTransposed],
[KernelScheduleType.CpAsyncWarpSpecialized, EpilogueScheduleType.EpilogueTransposed]
])
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
CreateGemmUniversal3xOperator(manifest, layouts_tn_nn_nt, tile_descriptions, data_types, [
# [KernelScheduleType.CpAsyncWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized]
])
# Stream-K schedules
CreateGemmUniversal3xOperator(manifest, layouts_tn_nn_nt, tile_descriptions, data_types, [
[KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized]
], tile_schedulers=[TileSchedulerType.StreamK])
#
def GenerateSM90_TensorOp_int8_WGMMA_gemm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 12, 0):
return
# layouts for ABC and their alignments
layouts = [
[[LayoutType.RowMajor, 16], [LayoutType.ColumnMajor, 16], [LayoutType.ColumnMajor, 16]],
]
math_instructions = [
MathInstruction(
[64, 128, 32],
DataType.s8, DataType.s8, DataType.s32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 32],
DataType.u8, DataType.u8, DataType.s32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 256, 32],
DataType.s8, DataType.s8, DataType.s32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 256, 32],
DataType.u8, DataType.u8, DataType.s32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
]
min_cc = 90
max_cc = 90
for math_inst in math_instructions:
# 64x128x128 or 64x256x128
tile_descriptions_small = [
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
]
# 128x128x128 or 128x256x128
tile_descriptions_medium = [
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
]
tile_descriptions = tile_descriptions_medium + tile_descriptions_small
data_types = [
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : math_inst.element_accumulator,
"d_type" : math_inst.element_accumulator,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.s8,
"d_type" : math_inst.element_a,
"acc_type" : math_inst.element_accumulator,
"epi_type" : DataType.f32
}
]
for data_type in data_types:
for layout in layouts:
layout[2][1] = 128 // DataTypeSize[data_type["d_type"]]
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type)
# persistent kernels with TMA epilogues
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
# Emit instance without C allocation+load
data_types += [
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.void,
"d_type" : math_inst.element_accumulator,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
}
]
for data_type in data_types:
# Set output alignment based on destination format first
for layout in layouts:
layout[2][1] = 128 // DataTypeSize[data_type["d_type"]]
# Pingpong persistent
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized]])
# Cooperative persistent
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions_medium, data_type,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized]],
tile_schedulers=[TileSchedulerType.Persistent, TileSchedulerType.StreamK]
)
#
def GenerateSM90_TensorOp_int8_WGMMA_alignx_gemm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 12, 0):
return
# layouts for ABC and their alignments
layouts = [
[[LayoutType.RowMajor, 8], [LayoutType.ColumnMajor, 8], [LayoutType.ColumnMajor, 1]],
[[LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 1]],
]
math_instructions = [
MathInstruction(
[64, 128, 32],
DataType.s8, DataType.s8, DataType.s32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 32],
DataType.u8, DataType.u8, DataType.s32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
]
min_cc = 90
max_cc = 90
for math_inst in math_instructions:
tile_descriptions_small = [
# TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
# 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1]),
]
tile_descriptions_medium = [
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1]),
]
tile_descriptions = tile_descriptions_medium + tile_descriptions_small
data_types = [
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : math_inst.element_accumulator,
"d_type" : math_inst.element_accumulator,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.s8,
"d_type" : math_inst.element_a,
"acc_type" : math_inst.element_accumulator,
"epi_type" : DataType.f32
}
]
for data_type in data_types:
for layout in layouts:
layout[2][1] = 128 // DataTypeSize[data_type["d_type"]]
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, [
# [KernelScheduleType.ScheduleAuto, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.CpAsyncWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized]
])
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, [
# [KernelScheduleType.CpAsyncWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized]
])
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized]],
tile_schedulers=[TileSchedulerType.StreamK])
#
def GenerateSM90_TensorOp_fp8_WGMMA_gemm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 12, 0):
return
# layouts for ABC and their alignments
layouts = [
[[LayoutType.RowMajor, 16], [LayoutType.ColumnMajor, 16], [LayoutType.ColumnMajor, 1]], # TN Layout
]
math_instructions = [
# inst 64x128x32
MathInstruction(
[64, 128, 32],
DataType.e4m3, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 32],
DataType.e4m3, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 32],
DataType.e5m2, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 32],
DataType.e5m2, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
# inst 64x256x32
MathInstruction(
[64, 256, 32],
DataType.e4m3, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 256, 32],
DataType.e4m3, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 256, 32],
DataType.e5m2, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 256, 32],
DataType.e5m2, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
]
min_cc = 90
max_cc = 90
for math_inst in math_instructions:
data_types = [
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f32,
"d_type" : DataType.f32,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f32,
"d_type" : DataType.e4m3,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f32,
"d_type" : DataType.e5m2,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.bf16,
"d_type" : DataType.bf16,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.bf16,
"d_type" : DataType.e4m3,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.bf16,
"d_type" : DataType.e5m2,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f16,
"d_type" : DataType.f16,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f16,
"d_type" : DataType.e4m3,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f16,
"d_type" : DataType.e5m2,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
]
data_types_large_tile = [
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.void,
"d_type" : DataType.e5m2,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.void,
"d_type" : DataType.e4m3,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
}
]
if math_inst.instruction_shape[1] == 128:
tile_descriptions_small = [
# 64x128x128
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1]),
]
tile_descriptions_large = [
# 256x128x128
TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1]),
]
tile_descriptions = [
# 128x128x128
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1]),
]
elif math_inst.instruction_shape[1] == 256:
tile_descriptions_small = [
# 64x256x128
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1]),
]
tile_descriptions_large = []
tile_descriptions = [
# 128x256x128
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1]),
]
else:
assert False, "math inst is not supported"
# some schedules disabled to save on library size
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
schedules = [
#[KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized]
]
stream_k_schedules = [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized]]
else:
schedules = [
# [KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto],
[KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized]
# TmaWarpSpecializedCooperative require CUDA version >= 12.1 for optimal performance.
]
stream_k_schedules = []
for data_type in data_types:
# With No-SMEM epilogues
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, schedules)
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
# Persistent kernels with TMA epilogues
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative],
[KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
# Small tiles
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions_small, data_type,
[[KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized]])
# Large tiles
if len(tile_descriptions_large) > 0:
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions_large, data_types_large_tile,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative],
[KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
# Add stream-K variants (with and without TMA epilogues)
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, stream_k_schedules, tile_schedulers=[TileSchedulerType.StreamK])
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative],
[KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
tile_schedulers=[TileSchedulerType.StreamK])
#
def GenerateSM90_TensorOp_fp8_WGMMA_alignx_gemm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 12, 0):
return
# layouts for ABC and their alignments
layouts = [
[[LayoutType.RowMajor, 8], [LayoutType.ColumnMajor, 8], [LayoutType.ColumnMajor, 1]], # TN Layout
[[LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 1]], # TN Layout
]
math_instructions = [
# inst 64x128x32
MathInstruction(
[64, 128, 32],
DataType.e4m3, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 32],
DataType.e4m3, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 32],
DataType.e5m2, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 32],
DataType.e5m2, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
# inst 64x64x32
# MathInstruction(
# [64, 64, 32],
# DataType.e4m3, DataType.e4m3, DataType.f32,
# OpcodeClass.TensorOp,
# MathOperation.multiply_add),
# MathInstruction(
# [64, 64, 32],
# DataType.e4m3, DataType.e5m2, DataType.f32,
# OpcodeClass.TensorOp,
# MathOperation.multiply_add),
# MathInstruction(
# [64, 64, 32],
# DataType.e5m2, DataType.e4m3, DataType.f32,
# OpcodeClass.TensorOp,
# MathOperation.multiply_add),
# MathInstruction(
# [64, 64, 32],
# DataType.e5m2, DataType.e5m2, DataType.f32,
# OpcodeClass.TensorOp,
# MathOperation.multiply_add),
]
min_cc = 90
max_cc = 90
for math_inst in math_instructions:
data_types = [
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f32,
"d_type" : DataType.f32,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f32,
"d_type" : DataType.e4m3,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f32,
"d_type" : DataType.e5m2,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.bf16,
"d_type" : DataType.bf16,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.bf16,
"d_type" : DataType.e4m3,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.bf16,
"d_type" : DataType.e5m2,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f16,
"d_type" : DataType.f16,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f16,
"d_type" : DataType.e4m3,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f16,
"d_type" : DataType.e5m2,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
]
if math_inst.instruction_shape[1] == 128:
tile_descriptions = [
# 128x128x128
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1]),
]
# elif math_inst.instruction_shape[1] == 64:
# tile_descriptions = [
# # 256x64x128
# TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
# 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,1,1]),
# ]
else:
assert False, "math inst is not supported"
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
schedules = [
# [KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto],
[KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized],
# [KernelScheduleType.CpAsyncWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.CpAsyncWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized],
]
stream_k_schedules = [[KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized]]
else:
schedules = [
# [KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto],
[KernelScheduleType.CpAsyncWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized]
]
stream_k_schedules = []
for data_type in data_types:
# With No-SMEM epilogues
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, schedules)
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
# Persistent kernels with TMA epilogues
# CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
# [[KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
# Add stream-K variants (with and without TMA epilogues)
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, stream_k_schedules, tile_schedulers=[TileSchedulerType.StreamK])
# CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
# [[KernelScheduleType.CpAsyncWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
# tile_schedulers=[TileSchedulerType.StreamK])
#
def GenerateSM90_TensorOp_1684(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = MathInstruction(
[16, 8, 4],
DataType.f64, DataType.f64, DataType.f64,
OpcodeClass.TensorOp,
MathOperation.multiply_add)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 16], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([32, 256, 16], 3, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateGemmOperator(manifest, layouts, tile_descriptions,
data_type, alignment_constraints)
#
#
def GenerateSM90_TensorOp_1684_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8 ], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8 ], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8 ], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 8 ], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 8 ], 4, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 3, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM90_TensorOp_1684_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM90_TensorOp_1684_rank_k(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64]
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM90_TensorOp_1684_rank_k_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64]
# SYRK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM90_TensorOp_1684_rank_k_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [ComplexTransform.none,]
# SYRK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM90_TensorOp_1684_trmm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM90_TensorOp_1684_trmm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM90_TensorOp_1684_trmm_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM90_TensorOp_1684_symm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM90_TensorOp_1684_symm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
# SYMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM90_TensorOp_1684_symm_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [ComplexTransform.none,]
# SYMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
###################################################################################################
def GenerateSM90_Conv3x(manifest, cuda_version,
log_indent_level: int = 0):
"""
Generate CUTLASS 3 convolution kernel(s) for SM90.
This is meant to be called from GenerateSM90.
"""
log_debug_line('GenerateSM90_Conv3x', log_indent_level)
log_indent_level = log_indent_level + 1
if not CudaToolkitVersionSatisfies(cuda_version, 12, 0):
return
minimum_compute_capability = 90
maximum_compute_capability = 90
spatial_dims = (2, 3)
# This function only generates kernels that use TMA.
byte_alignment_required_by_tma = 16
tma_byte_alignments = {
'A': byte_alignment_required_by_tma,
'B': byte_alignment_required_by_tma,
'C': byte_alignment_required_by_tma,
}
# For tuples of one element, the element needs to end with comma.
all_byte_alignments = (
tma_byte_alignments,
)
# MMA shapes (MMA_M, MMA_N, MMA_K):
#
# Different hardware MMA instructions may have different MMA shapes.
# This function may generate kernels with different MMA shapes for
# different data types, either because the hardware only supports
# certain shapes for certain types, or for performance reasons
# (CUTLASS doesn't need to generate all valid kernels for the
# profiler library, just the best-performing ones).
#
# The kernel names refer to tile shapes (TILE_M, TILE_N, TILE_K)
# instead of MMA shapes. For SM >= 90 kernels, TILE_K = 4 * MMA_K,
# where 4, the "number of MMA instructions per tile," is determined
# through some combination of modeling and experiment.
#
# For performance on sm90, generally CUTLASS generates 64x128
# instead of 128x64.
mma_64x64x16 = ( 64, 64, 16)
mma_64x64x8 = ( 64, 64, 8)
num_mma_per_tile = 4
# Cluster shapes (1, 1, 1) and (2, 2, 1) are valid,
# but not included, because they tend not to perform as well.
cluster_shapes = (
(2, 1, 1),
(1, 2, 1),
)
fp16 = DataType.f16
bf16 = DataType.bf16
fp32 = DataType.f32
s8 = DataType.s8
s32 = DataType.s32
# When generating kernels, the usual way is to specify 4 types,
# (A, B, Acc, C/D). Tests instead have 5 types,
# (ElementAct, ElementFlt, ElementOut, ElementAcc, ElementCompute),
# where ElementCompute is also called 'epi_type',
# and corresponds to the type of epilogue activations.
# This script maps tests' 5 types to 4 types
# by making ElementCompute the same as ElementOut.
fp16_fp32_fp16_fp32 = {
'a_type': fp16, # ElementAct(ivation)
'b_type': fp16, # ElementF(i)lt(er)
'c_type': fp32, # ElementAcc
'd_type': fp32, # ElementOut (used only by CollectiveEpilogue)
'acc_type': fp16, # ElementAcc
'epi_type': fp32, # ElementCompute (used only by CollectiveEpilogue)
}
fp16_fp32_fp32_fp32 = {
'a_type': fp16,
'b_type': fp16,
'c_type': fp32,
'd_type': fp32,
'acc_type': fp32,
'epi_type': fp32,
}
fp32_fp32_fp32_fp32 = {
'a_type': fp32,
'b_type': fp32,
'c_type': fp32,
'd_type': fp32,
'acc_type': fp32,
'epi_type': fp32,
}
s8_s32_s32_s32 = {
'a_type': s8,
'b_type': s8,
'c_type': s32,
'd_type': s32,
'acc_type': s32,
'epi_type': s32,
}
# Other NVIDIA libraries may have the habit of specifying data types like this.
bf16bf16_bf16f32_f32 = {
'a_type': bf16,
'b_type': bf16,
'c_type': fp32,
'd_type': fp32,
'acc_type': fp32,
'epi_type': fp32,
}
f16f16_f16f16_f16 = {
'a_type': fp16,
'b_type': fp16,
'c_type': fp16,
'd_type': fp16,
'acc_type': fp16,
'epi_type': fp16,
}
f16f16_f16f32_f32 = {
'a_type': fp16,
'b_type': fp16,
'c_type': fp16,
'd_type': fp16,
'acc_type': fp32,
'epi_type': fp32,
}
f32f32_tf32f32_f32 = fp32_fp32_fp32_fp32
i8i8_i8i32_f32 = {
'a_type': s8,
'b_type': s8,
'c_type': s32,
'd_type': s32,
'acc_type': s32,
'epi_type': s32,
}
# Each element in the outermost iterable is one combination of
#
# (ConvKind, spatial_dimension, data_types, byte_alignments, mma_sizes, cluster_sizes)
#
# for which to generate a kernel. spatial_dimension is the spatial
# dimension of the convolution: either 1, 2, or 3. byte_alignments
# is a triple of required minimum byte alignments for A, B, and C.
#
# Note that itertools functions produce a single-pass generator.
# The code doesn't need a multipass iterable, but if one did, one
# could call `tuple` or `list` on the generator.
#
# While this happens to use the same cluster sizes for each element,
# the code doesn't require that. Different convolution kinds, data
# types, or mma sizes might have different optimal cluster sizes.
combinations_of_parameters = chain(
# The following are all the kernels exercised in the unit tests.
# Please try to keep in sync with the unit tests.
product(
(
ConvKind.Fprop,
),
spatial_dims,
(
fp16_fp32_fp16_fp32,
fp16_fp32_fp32_fp32,
s8_s32_s32_s32,
),
all_byte_alignments,
(
mma_64x64x16,
),
cluster_shapes
),
product(
(
ConvKind.Fprop,
),
spatial_dims,
(
fp32_fp32_fp32_fp32,
),
all_byte_alignments,
(
mma_64x64x8,
),
cluster_shapes
),
product(
(
ConvKind.Dgrad,
),
spatial_dims,
(
fp16_fp32_fp16_fp32,
fp16_fp32_fp32_fp32,
),
all_byte_alignments,
(
mma_64x64x16,
),
cluster_shapes
),
# Kernels not necessarily in the unit tests, but used elsewhere
# and thus useful to have generated for profiling. They may
# duplicate kernels above. All of them are 2-D. In general,
# CUTLASS prefers 64 x 128 to 128 x 64 on sm90, even if the
# hardware permits 128 x 64.
(
# Fprop
#
# bf16bf16_bf16f32_f32
#
# cluster shape (2, 1, 1)
#
(ConvKind.Fprop, 2, bf16bf16_bf16f32_f32, tma_byte_alignments, (128, 256, 8), (2, 1, 1)),
(ConvKind.Fprop, 2, bf16bf16_bf16f32_f32, tma_byte_alignments, (128, 256, 16), (2, 1, 1)),
(ConvKind.Fprop, 2, bf16bf16_bf16f32_f32, tma_byte_alignments, (256, 128, 8), (2, 1, 1)),
(ConvKind.Fprop, 2, bf16bf16_bf16f32_f32, tma_byte_alignments, (256, 128, 16), (2, 1, 1)),
#
# f16f16_f16f16_f16
#
# cluster shape (1, 1, 1)
#
(ConvKind.Fprop, 2, f16f16_f16f16_f16, tma_byte_alignments, ( 64, 64, 8), (1, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f16_f16, tma_byte_alignments, ( 64, 64, 16), (1, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f16_f16, tma_byte_alignments, ( 64, 128, 8), (1, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f16_f16, tma_byte_alignments, ( 64, 128, 16), (1, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f16_f16, tma_byte_alignments, ( 64, 256, 8), (1, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f16_f16, tma_byte_alignments, ( 64, 256, 16), (1, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f16_f16, tma_byte_alignments, (128, 128, 8), (1, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f16_f16, tma_byte_alignments, (128, 128, 16), (1, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f16_f16, tma_byte_alignments, (128, 256, 8), (1, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f16_f16, tma_byte_alignments, (128, 256, 16), (1, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f16_f16, tma_byte_alignments, (256, 64, 8), (1, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f16_f16, tma_byte_alignments, (256, 64, 16), (1, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f16_f16, tma_byte_alignments, (256, 128, 8), (1, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f16_f16, tma_byte_alignments, (256, 128, 16), (1, 1, 1)),
#
# f16f16_f16f32_f32
#
# cluster shape (2, 1, 1)
#
(ConvKind.Fprop, 2, f16f16_f16f32_f32, tma_byte_alignments, (128, 192, 8), (2, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f32_f32, tma_byte_alignments, (128, 192, 16), (2, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f32_f32, tma_byte_alignments, (128, 256, 8), (2, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f32_f32, tma_byte_alignments, (128, 256, 16), (2, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f32_f32, tma_byte_alignments, (256, 96, 8), (2, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f32_f32, tma_byte_alignments, (256, 96, 16), (2, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f32_f32, tma_byte_alignments, (256, 128, 8), (2, 1, 1)),
(ConvKind.Fprop, 2, f16f16_f16f32_f32, tma_byte_alignments, (256, 128, 16), (2, 1, 1)),
#
# f32f32_tf32f32_f32
#
# cluster shape (2, 1, 1)
#
(ConvKind.Fprop, 2, f32f32_tf32f32_f32, tma_byte_alignments, (128, 192, 8), (2, 1, 1)),
(ConvKind.Fprop, 2, f32f32_tf32f32_f32, tma_byte_alignments, (128, 256, 8), (2, 1, 1)),
(ConvKind.Fprop, 2, f32f32_tf32f32_f32, tma_byte_alignments, (256, 128, 8), (2, 1, 1)),
(ConvKind.Fprop, 2, f32f32_tf32f32_f32, tma_byte_alignments, (256, 96, 8), (2, 1, 1)),
#
# i8i8_i8i32_f32
#
# cluster shape (2, 1, 1)
#
(ConvKind.Fprop, 2, i8i8_i8i32_f32, tma_byte_alignments, (128, 256, 16), (2, 1, 1)),
(ConvKind.Fprop, 2, i8i8_i8i32_f32, tma_byte_alignments, (128, 256, 32), (2, 1, 1)),
(ConvKind.Fprop, 2, i8i8_i8i32_f32, tma_byte_alignments, (256, 128, 16), (2, 1, 1)),
(ConvKind.Fprop, 2, i8i8_i8i32_f32, tma_byte_alignments, (256, 128, 32), (2, 1, 1)),
#
# Dgrad
#
# bf16bf16_bf16f32_f32
#
# cluster shape (2, 1, 1)
#
(ConvKind.Dgrad, 2, bf16bf16_bf16f32_f32, tma_byte_alignments, (128, 256, 8), (2, 1, 1)),
(ConvKind.Dgrad, 2, bf16bf16_bf16f32_f32, tma_byte_alignments, (128, 256, 16), (2, 1, 1)),
(ConvKind.Dgrad, 2, bf16bf16_bf16f32_f32, tma_byte_alignments, (256, 128, 8), (2, 1, 1)),
(ConvKind.Dgrad, 2, bf16bf16_bf16f32_f32, tma_byte_alignments, (256, 128, 16), (2, 1, 1)),
#
# f16f16_f16f16_f16
#
# cluster shape (1, 1, 1)
#
(ConvKind.Dgrad, 2, f16f16_f16f16_f16, tma_byte_alignments, ( 64, 64, 8), (1, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f16_f16, tma_byte_alignments, ( 64, 64, 16), (1, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f16_f16, tma_byte_alignments, ( 64, 128, 8), (1, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f16_f16, tma_byte_alignments, ( 64, 128, 16), (1, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f16_f16, tma_byte_alignments, ( 64, 256, 8), (1, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f16_f16, tma_byte_alignments, ( 64, 256, 16), (1, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f16_f16, tma_byte_alignments, (128, 128, 8), (1, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f16_f16, tma_byte_alignments, (128, 128, 16), (1, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f16_f16, tma_byte_alignments, (128, 256, 8), (1, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f16_f16, tma_byte_alignments, (128, 256, 16), (1, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f16_f16, tma_byte_alignments, (256, 64, 8), (1, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f16_f16, tma_byte_alignments, (256, 64, 16), (1, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f16_f16, tma_byte_alignments, (256, 128, 8), (1, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f16_f16, tma_byte_alignments, (256, 128, 16), (1, 1, 1)),
#
# f16f16_f16f32_f32
#
# cluster shape (2, 1, 1)
#
(ConvKind.Dgrad, 2, f16f16_f16f32_f32, tma_byte_alignments, (128, 256, 8), (2, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f32_f32, tma_byte_alignments, (128, 256, 16), (2, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f32_f32, tma_byte_alignments, (256, 128, 8), (2, 1, 1)),
(ConvKind.Dgrad, 2, f16f16_f16f32_f32, tma_byte_alignments, (256, 128, 16), (2, 1, 1)),
),
)
# SM >= 90 kernels don't actually use warp_count, but the
# TileDescription class needs it. The 4 in the default
# warp_count has nothing to do with num_mma_per_tile.
warp_count = [4, 1, 1]
stages = 0 # zero means "deduce the number of stages automatically"
mainloop_schedule = KernelScheduleType.ImplicitTmaWarpSpecializedSm90
epilogue_schedule = EpilogueScheduleType.TmaWarpSpecialized
schedule_pairs = (
(mainloop_schedule, epilogue_schedule),
)
tile_schedulers = (
TileSchedulerType.Default, # -> void
)
def make_math_instruction(data_types: Dict[str, DataType],
mma_shape: Tuple[int, int, int]) -> MathInstruction:
default_opcode = OpcodeClass.TensorOp
default_math_op = MathOperation.multiply_add
return MathInstruction(
mma_shape,
data_types['a_type'], data_types['b_type'], data_types['c_type'],
default_opcode,
default_math_op
)
for (conv_kind, spatial_dim, data_types, byte_alignments, mma_shape, cluster_shape) in combinations_of_parameters:
math_inst = make_math_instruction(data_types, mma_shape)
tile_shape = (mma_shape[0], mma_shape[1], num_mma_per_tile * mma_shape[2])
tile_description = TileDescription(tile_shape, stages, warp_count, math_inst,
minimum_compute_capability, maximum_compute_capability, cluster_shape)
assert(isinstance(spatial_dim, int))
assert(isinstance(byte_alignments, dict))
dims_and_alignments = (
(
(spatial_dim, byte_alignments['A']),
(spatial_dim, byte_alignments['B']),
(spatial_dim, byte_alignments['C']),
),
)
CreateConvOperator3x(manifest,
dims_and_alignments = dims_and_alignments,
tile_descriptions = [tile_description],
data_types = data_types,
schedule_pairs = schedule_pairs,
tile_schedulers = tile_schedulers,
conv_kind = conv_kind,
log_indent_level = log_indent_level)
def GenerateSM90(manifest, cuda_version):
GenerateSM90_TensorOp_16b_WGMMA_gemm(manifest, cuda_version)
GenerateSM90_TensorOp_16b_WGMMA_alignx_gemm(manifest, cuda_version)
GenerateSM90_TensorOp_tf32_WGMMA_gemm(manifest, cuda_version)
GenerateSM90_TensorOp_tf32_WGMMA_alignx_gemm(manifest, cuda_version)
GenerateSM90_TensorOp_int8_WGMMA_gemm(manifest, cuda_version)
GenerateSM90_TensorOp_int8_WGMMA_alignx_gemm(manifest, cuda_version)
GenerateSM90_TensorOp_fp8_WGMMA_gemm(manifest, cuda_version)
GenerateSM90_TensorOp_fp8_WGMMA_alignx_gemm(manifest, cuda_version)
GenerateSM90_TensorOp_1684(manifest, cuda_version)
GenerateSM90_TensorOp_1684_complex(manifest, cuda_version)
GenerateSM90_TensorOp_1684_complex_gaussian(manifest, cuda_version)
GenerateSM90_TensorOp_1684_rank_k(manifest, cuda_version)
GenerateSM90_TensorOp_1684_rank_k_complex(manifest, cuda_version)
GenerateSM90_TensorOp_1684_rank_k_complex_gaussian(manifest, cuda_version)
GenerateSM90_TensorOp_1684_trmm(manifest, cuda_version)
GenerateSM90_TensorOp_1684_trmm_complex(manifest, cuda_version)
GenerateSM90_TensorOp_1684_trmm_complex_gaussian(manifest, cuda_version)
GenerateSM90_TensorOp_1684_symm(manifest, cuda_version)
GenerateSM90_TensorOp_1684_symm_complex(manifest, cuda_version)
GenerateSM90_TensorOp_1684_symm_complex_gaussian(manifest, cuda_version)
GenerateSM90_Conv3x(manifest, cuda_version)
###################################################################################################
def numeric_log_level(log_level: str) -> int:
"""
Converts the string identifier of the log level
into the numeric identifier used in setting the log level.
:param x: string representation of log level (e.g., 'INFO', 'DEBUG')
:type x: str
:return: numeric representation of log level
:rtype: int
"""
numeric_level = getattr(logging, log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError(f'Invalid log level: {log_level}')
return numeric_level
# This function for defining the ArgumentParser is used to make it easy for the CUTLASS Python interface
# to leverage the functionality in this file without running this script via a shell prompt.
def define_parser():
parser = argparse.ArgumentParser(description="Generates device kernel registration code for CUTLASS Kernels")
parser.add_argument("--operations", default="all", help="Specifies the operation to generate (gemm, all)")
parser.add_argument("--build-dir", default=".", required=False, help="CUTLASS top-level build directory")
parser.add_argument("--curr-build-dir", default=".", help="CUTLASS current build directory. cmake files will be emitted in this directory")
parser.add_argument("--generator-target", default='library', help="Target of CUTLASS Library Generator.")
parser.add_argument("--architectures", default='53;60;61;70;75;80;90', help="Target compute architectures")
parser.add_argument("--kernels", default='', help='Comma-delimited list to filter kernels by name. ' +
'Specifying this as \"all\" includes ALL the kernels, ' +
'while not specifying this includes only the default set of kernels.')
parser.add_argument("--ignore-kernels", default='', help='Comma-delimited list of kernels ' +
'to exclude from build. For backwards compatibility reasons, ' +
'this option only takes effect if --kernels is set to a nonempty value.')
parser.add_argument("--exclude-kernels", default='', help='Comma-delimited list of kernels ' +
'to exclude from build. In contrast to --ignore-kernels, ' +
'this option always takes effect, ' +
'whether or not --kernels is set to a nonempty value. ' +
'It also can exclude kernels from the filter file ' +
'(see --kernel-filter-file option below).')
parser.add_argument("--filter-by-cc", default='True', type=str, help='If enabled, kernels whose compute capability range is not satisfied by the build target are excluded.')
parser.add_argument("--cuda-version", default="11.0.0", help="Semantic version string of CUDA Toolkit")
parser.add_argument('--kernel-filter-file', type=str, default=None, required=False, help='Full path of filter file')
parser.add_argument('--selected-kernel-list', type=str, default=None, required=False,
help='Specify the output log file containing all enabled kernels in this build')
parser.add_argument("--interface-dir", default=None, required=False, help="Interface header to kernels")
parser.add_argument("--disable-full-archs-compilation", action="store_true", required=False, help="Disable compilation for every archs in --architectures")
parser.add_argument("--log-level", default='info', type=numeric_log_level, required=False,
help='Logging level to be used by the generator script')
_add_package_disablement_flag(parser)
return parser
if __name__ == "__main__":
parser = define_parser()
args = parser.parse_args()
# Set the logging level based on the user-provided `--log-level` command-line option
logging.basicConfig(level=args.log_level)
manifest = Manifest(args)
GenerateSM50(manifest, args.cuda_version)
GenerateSM60(manifest, args.cuda_version)
GenerateSM61(manifest, args.cuda_version)
GenerateSM70(manifest, args.cuda_version)
GenerateSM75(manifest, args.cuda_version)
GenerateSM80(manifest, args.cuda_version)
GenerateSM89(manifest, args.cuda_version)
GenerateSM90(manifest, args.cuda_version)
if 'library' in args.generator_target.split(','):
manifest.emit(GeneratorTarget.Library)
if args.selected_kernel_list is not None:
if len(manifest.selected_kernels) > 0:
with open(args.selected_kernel_list, 'w') as file_writer:
for line in manifest.selected_kernels:
file_writer.write("%s\n" % line)
###################################################################################################
| python/cutlass_library/generator.py/0 | {
"file_path": "python/cutlass_library/generator.py",
"repo_id": "python",
"token_count": 129997
} | 40 |
Utilities
=========
Checks
------
.. automodule:: cutlass.utils.check
:members:
:undoc-members:
:show-inheritance:
Data Types
----------
.. automodule:: cutlass.utils.datatypes
:members:
:undoc-members:
:show-inheritance:
| python/docs_src/source/cutlass.utils.rst/0 | {
"file_path": "python/docs_src/source/cutlass.utils.rst",
"repo_id": "python",
"token_count": 96
} | 41 |
[metadata]
name = nvidia-cutlass
version = 3.4.0.0
[options]
packages =
cutlass
cutlass.backend
cutlass.backend.evt
cutlass.backend.evt.backend
cutlass.backend.evt.frontend
cutlass.backend.evt.ir
cutlass.backend.evt.passes
cutlass.backend.utils
cutlass.emit
cutlass.epilogue
cutlass.op
cutlass.utils
cutlass_library
cutlass_library.source
pycute
package_dir =
cutlass=python/cutlass
cutlass_library=python/cutlass_library
cutlass_library.source=.
pycute=python/pycute
include_package_data = True
[options.package_data]
cutlass_library.source = include/**/*, examples/**/*, tools/**/*
[options.exclude_package_data]
cutlass_library.source = include/**/*.py, examples/**/*.py, tools/**/*.py
| setup.cfg/0 | {
"file_path": "setup.cfg",
"repo_id": "setup.cfg",
"token_count": 278
} | 42 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Low-level functionality tests for GEMM with F16 operands on SM90
"""
from functools import partial
import logging
import unittest
import cutlass
from cutlass.backend.utils.device import device_cc
from utils import LayoutCombination, add_test_gemm
cutlass.set_log_level(logging.WARNING)
cc = 90
dtype = cutlass.DataType.f16
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM90 tests.')
@unittest.skipIf(cutlass.utils.datatypes.torch_type(dtype) is None, f'Version of torch installed does not contain a datatype match for {dtype}')
class GemmF16Sm90(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
add_test_specialized = partial(add_test_gemm, cls=GemmF16Sm90, element=dtype,
warp_count=None, compilation_modes=['nvcc'])
add_test_tensorop = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp)
# Tests with 1x1x1 clusters
add_test_unit_cluster = partial(add_test_tensorop, cluster_shape=[1, 1, 1])
add_test_unit_cluster(layouts=LayoutCombination.NNN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], stages=3)
add_test_unit_cluster(layouts=LayoutCombination.NNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], stages=None)
add_test_unit_cluster(layouts=LayoutCombination.NTN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], stages=None)
add_test_unit_cluster(layouts=LayoutCombination.NTT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], stages=None)
add_test_unit_cluster(layouts=LayoutCombination.TNN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], stages=None)
add_test_unit_cluster(layouts=LayoutCombination.TNT, alignments=[4, 4, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], stages=None)
add_test_unit_cluster(layouts=LayoutCombination.TNT, alignments=[4, 4, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], stages=None)
add_test_unit_cluster(layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], stages=None)
add_test_unit_cluster(layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 64], stages=5)
add_test_unit_cluster(layouts=LayoutCombination.TNT, alignments=[2, 2, 2], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], stages=None)
# Tests with different cluster shapes
add_test_cluster_shape = partial(add_test_tensorop, threadblock_shape=[64, 128, 64], stages=None)
add_test_cluster_shape(layouts=LayoutCombination.TTN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f16, cluster_shape=[2, 2, 1])
add_test_cluster_shape(layouts=LayoutCombination.TNN, alignments=[8, 8, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, cluster_shape=[2, 2, 1])
add_test_cluster_shape(layouts=LayoutCombination.NTN, alignments=[8, 8, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, cluster_shape=[2, 2, 1])
add_test_cluster_shape(layouts=LayoutCombination.NNN, alignments=[8, 8, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, cluster_shape=[2, 2, 1])
add_test_cluster_shape(layouts=LayoutCombination.TTN, alignments=[8, 8, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, cluster_shape=[1, 4, 1])
add_test_cluster_shape(layouts=LayoutCombination.TTN, alignments=[8, 8, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, cluster_shape=[2, 4, 1])
add_test_cluster_shape(layouts=LayoutCombination.TTN, alignments=[8, 8, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, cluster_shape=[4, 1, 1])
add_test_cluster_shape(layouts=LayoutCombination.TTN, alignments=[8, 8, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, cluster_shape=[4, 2, 1])
# Tests for different schedule modes
add_test_schedule = partial(add_test_specialized, layouts=LayoutCombination.TTN, alignments=[8, 8, 4],
element_output=cutlass.DataType.f32, element_accumulator=cutlass.DataType.f32,
opclass=cutlass.OpcodeClass.TensorOp, threadblock_shape=[128, 128, 64], stages=None)
add_test_schedule(
cluster_shape=[1, 1, 1],
kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedPingpong,
epilogue_schedule=cutlass.EpilogueScheduleType.TmaWarpSpecialized
)
add_test_schedule(
cluster_shape=[1, 1, 1],
kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedCooperative,
epilogue_schedule=cutlass.EpilogueScheduleType.TmaWarpSpecializedCooperative
)
add_test_schedule(
cluster_shape=[2, 1, 1],
kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedPingpong,
epilogue_schedule=cutlass.EpilogueScheduleType.TmaWarpSpecialized
)
add_test_schedule(
cluster_shape=[2, 1, 1],
kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedCooperative,
epilogue_schedule=cutlass.EpilogueScheduleType.TmaWarpSpecializedCooperative
)
# Tests using SIMT
add_test_simt = partial(add_test_specialized, opclass=cutlass.OpcodeClass.Simt, alignments=[1, 1, 1], cluster_shape=[1, 1, 1], stages=2)
add_test_simt(layouts=LayoutCombination.NNN, element_output=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 8])
add_test_simt(layouts=LayoutCombination.TNN, element_output=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 128, 8])
add_test_simt(layouts=LayoutCombination.NTN, element_output=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 64, 8])
add_test_simt(layouts=LayoutCombination.TTN, element_output=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 8])
add_test_simt(layouts=LayoutCombination.NNT, element_output=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 8])
# Tests with void-C kernels
add_test_cluster_shape(layouts=LayoutCombination.NNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], stages=None,
cluster_shape=[2, 1, 1], element_C=cutlass.DataType.void)
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/gemm/gemm_f16_sm90.py/0 | {
"file_path": "test/python/cutlass/gemm/gemm_f16_sm90.py",
"repo_id": "test",
"token_count": 3541
} | 43 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <iomanip>
#include <utility>
#include <type_traits>
#include <vector>
#include <numeric>
#include <tuple>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/numeric/numeric_types.hpp>
using namespace cute;
namespace cooperative_copy_mode {
struct global_shared {};
struct global_global {};
struct shared_shared {};
}
// gs --> global to/from shared
template <int MaxVecBits, uint32_t ThreadBlockSize, class T, class GMemLayout, class SMemLayout>
__device__ void
cooperative_copy_default_gs(T const* g_in, T* g_out, GMemLayout const& gmem_layout, SMemLayout const& smem_layout)
{
using namespace cute;
extern __shared__ uint128_t smem_buf[];
// Cast smem_buf to smem_uint8_ptr and move it by MaxVecBits bits
// This is to make sure tests pass on pointer aligned to MaxVecBits bits
uint8_t* smem_uint8_ptr = reinterpret_cast<uint8_t*>(smem_buf) + (MaxVecBits/8);
T* smem = reinterpret_cast<T*>(smem_uint8_ptr);
Tensor g_in_tensor = make_tensor(make_gmem_ptr(g_in), gmem_layout);
Tensor g_out_tensor = make_tensor(make_gmem_ptr(g_out), gmem_layout);
Tensor s_tensor = make_tensor(make_smem_ptr(smem), smem_layout);
cooperative_copy<ThreadBlockSize, MaxVecBits>(threadIdx.x, g_in_tensor, s_tensor);
cp_async_fence();
cp_async_wait<0>();
__syncthreads();
if(thread0()) {
for(int i = 0; i < size(s_tensor); ++i) {
s_tensor(i) += T(i);
}
}
__syncthreads();
cooperative_copy<ThreadBlockSize, MaxVecBits>(threadIdx.x, s_tensor, g_out_tensor);
}
// ss --> shared to shared
template <int MaxVecBits, uint32_t ThreadBlockSize, class T, class Layout1, class Layout2>
__device__ void
cooperative_copy_default_ss(T const* g_in, T* g_out, Layout1 const& layout1, Layout2 const& layout2)
{
using namespace cute;
extern __shared__ uint128_t smem_buf[];
// Cast smem_buf to smem_uint8_ptr and move it by MaxVecBits bits
// This is to make sure tests pass on pointer aligned to MaxVecBits bits
T* smem1 = reinterpret_cast<T*>(smem_buf);
uint8_t* smem2_uint8_ptr = reinterpret_cast<uint8_t*>(smem_buf) + (MaxVecBits/8);
T* smem2 = reinterpret_cast<T*>(smem2_uint8_ptr) + cute::cosize(layout2);
Tensor g_in_tensor = make_tensor(make_gmem_ptr(g_in), layout1);
Tensor g_out_tensor = make_tensor(make_gmem_ptr(g_out), layout2);
Tensor s1_tensor = make_tensor(make_smem_ptr(smem1), layout2);
Tensor s2_tensor = make_tensor(make_smem_ptr(smem2), layout1);
cooperative_copy<ThreadBlockSize, cute::sizeof_bits_v<T>>(threadIdx.x, g_in_tensor, s1_tensor);
cp_async_fence();
cp_async_wait<0>();
__syncthreads();
if(thread0()) {
for(int i = 0; i < size(s1_tensor); ++i) {
s1_tensor(i) += T(i);
}
}
__syncthreads();
cooperative_copy<ThreadBlockSize, MaxVecBits>(threadIdx.x, s1_tensor, s2_tensor);
__syncthreads();
cooperative_copy<ThreadBlockSize, cute::sizeof_bits_v<T>>(threadIdx.x, s2_tensor, g_out_tensor);
}
// gg --> global to global
template <int MaxVecBits, uint32_t ThreadBlockSize, class T, class Layout1, class Layout2>
__device__ void
cooperative_copy_default_gg(T const* g_in, T* g_out, Layout1 const& layout1, Layout2 const& layout2)
{
using namespace cute;
Tensor g_in_tensor = make_tensor(make_gmem_ptr(g_in), layout1);
Tensor g_out_tensor = make_tensor(make_gmem_ptr(g_out), layout2);
cooperative_copy<ThreadBlockSize, MaxVecBits>(threadIdx.x, g_in_tensor, g_out_tensor);
}
template <class Mode, int MaxVecBits, uint32_t ThreadBlockSize, class T, class Layout1, class Layout2>
__global__ void
cooperative_copy_default_kernel(T const* g_in, T* g_out, Layout1 const layout1, Layout2 const layout2)
{
if constexpr(std::is_same_v<Mode, cooperative_copy_mode::global_shared>) {
cooperative_copy_default_gs<MaxVecBits, ThreadBlockSize>(g_in, g_out, layout1, layout2);
} else if constexpr (std::is_same_v<Mode, cooperative_copy_mode::global_global>) {
cooperative_copy_default_gg<MaxVecBits, ThreadBlockSize>(g_in, g_out, layout1, layout2);
} else if constexpr (std::is_same_v<Mode, cooperative_copy_mode::shared_shared>) {
cooperative_copy_default_ss<MaxVecBits, ThreadBlockSize>(g_in, g_out, layout1, layout2);
}
}
// Mode - defines memory types of src and dst in cooperative_copy operation
// MaxVecBits - defines max vectorization in cooperative_copy operation, and enforces that
// alignment on used pointers to ensure correct testing
template <class Mode, int MaxVecBits, uint32_t ThreadBlockSize, class T, class Layout1, class Layout2>
void test_cooperative_copy_default(Layout1 const& layout1, Layout2 const& layout2)
{
using value_type = T;
CUTE_STATIC_ASSERT_V(cute::size(layout1) == cute::size(layout2));
auto gmem_layout_in = layout1;
auto gmem_layout_out = cute::conditional_return<std::is_same_v<Mode, cooperative_copy_mode::global_shared>>(layout1, layout2);
#if 0
print(" "); print("layout1: "); print(layout1); print("\n");
print(" "); print("layout2: "); print(layout2); print("\n");
print(" "); print("threads: "); print(ThreadBlockSize); print("\n");
print(" "); print("maxvecbits: "); print(MaxVecBits); print("\n");
#endif
if constexpr (MaxVecBits < cute::sizeof_bits_v<value_type>) {
GTEST_SKIP() << "Skipping test since MaxVecBits (=" << MaxVecBits
<< ") < cute::sizeof_bits_v<value_type> (=" << cute::sizeof_bits_v<value_type> << ")";
} else {
constexpr auto max_vec_bytes = MaxVecBits / 8;
static_assert((max_vec_bytes % sizeof(T)) == 0);
uint32_t count = cute::cosize(gmem_layout_in);
// Extra elements to force MaxVecBits alignment in global memory
uint32_t extra_elements = max_vec_bytes / sizeof(value_type);
// Allocate
thrust::host_vector<value_type> h_in (count + extra_elements);
thrust::host_vector<value_type> h_out(count + extra_elements);
// Initialize
Tensor h_in_tensor = make_tensor(h_in.data() + extra_elements, gmem_layout_in);
Tensor h_out_tensor = make_tensor(h_out.data() + extra_elements, gmem_layout_out);
for (int i = 0; i < cute::size(h_in_tensor); ++i) {
h_in_tensor(i) = value_type(float(i));
// For global-to-global copy need to compare against the same value
h_out_tensor(i) = std::is_same_v<Mode, cooperative_copy_mode::global_global> ? value_type(float(i)) : value_type(float(2 * i));
}
// To GPU
thrust::device_vector<value_type> d_in = h_in;
thrust::device_vector<value_type> d_out(d_in.size(), value_type(float(-2)));
// Adds (MaxVecBits/8) bytes to shared memory as we'll move pointer by that many bytes inside the kernel to enforce
// alignment to (MaxVecBits/8) bytes
size_t shared_memory_bytes = (sizeof(value_type) * count) + max_vec_bytes;
shared_memory_bytes += std::is_same_v<Mode, cooperative_copy_mode::shared_shared> * (sizeof(value_type) * count);
// Launch
auto coop_copy = cooperative_copy_default_kernel<Mode, MaxVecBits, ThreadBlockSize, value_type, Layout1, Layout2>;
ASSERT_EQ(cudaFuncSetAttribute(coop_copy, cudaFuncAttributeMaxDynamicSharedMemorySize, static_cast<int>(shared_memory_bytes)), cudaSuccess);
auto d_in_ptr = thrust::raw_pointer_cast(d_in.data() + extra_elements);
auto d_out_ptr = thrust::raw_pointer_cast(d_out.data() + extra_elements);
coop_copy<<<1, ThreadBlockSize, shared_memory_bytes>>>(d_in_ptr, d_out_ptr, layout1, layout2);
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
cudaError_t error = cudaGetLastError();
FAIL() << "Error at kernel sync: " << cudaGetErrorString(error) << "\n";
}
// Validate
thrust::host_vector<value_type> h_result = d_out;
Tensor h_result_tensor = make_tensor(h_result.data() + extra_elements, gmem_layout_out);
for (int i = 0; i < cute::size(h_in_tensor); ++i) {
ASSERT_EQ(h_result_tensor(i), h_out_tensor(i))
<< i << " - result:" << h_result_tensor(i) << " expected:" << h_out_tensor(i);
}
}
}
template<class T>
class SM80_CuTe_Ampere;
template<class Mode, class MaxVecBits>
class SM80_CuTe_Ampere<std::tuple<Mode, MaxVecBits>>: public testing::Test
{
public:
using mode = Mode;
static constexpr int max_vec_bits = MaxVecBits::value;
};
typedef testing::Types<
std::tuple<cooperative_copy_mode::global_shared, cute::Int<128>>,
std::tuple<cooperative_copy_mode::global_shared, cute::Int<64>>,
std::tuple<cooperative_copy_mode::global_shared, cute::Int<32>>,
std::tuple<cooperative_copy_mode::global_shared, cute::Int<16>>,
std::tuple<cooperative_copy_mode::global_global, cute::Int<128>>,
std::tuple<cooperative_copy_mode::global_global, cute::Int<64>>,
std::tuple<cooperative_copy_mode::global_global, cute::Int<32>>,
std::tuple<cooperative_copy_mode::global_global, cute::Int<16>>,
std::tuple<cooperative_copy_mode::shared_shared, cute::Int<128>>,
std::tuple<cooperative_copy_mode::shared_shared, cute::Int<64>>,
std::tuple<cooperative_copy_mode::shared_shared, cute::Int<32>>,
std::tuple<cooperative_copy_mode::shared_shared, cute::Int<16>>,
> CooperativeCopyModeMaxVecBitsList;
TYPED_TEST_SUITE(SM80_CuTe_Ampere, CooperativeCopyModeMaxVecBitsList);
// Fast path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefault1D)
{
using value_type = float;
constexpr uint32_t count = 512;
auto gmem_layout = make_layout(make_shape(Int<count>{}));
auto smem_layout = make_layout(make_shape(Int<count>{}));
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefault1DFallback)
{
using value_type = float;
constexpr uint32_t count = 99;
auto gmem_layout = make_layout(make_shape(Int<count>{}));
auto smem_layout = make_layout(make_shape(Int<count>{}));
constexpr uint32_t thread_block_size = 128;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// Fast path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefault2D)
{
using value_type = float;
constexpr uint32_t x = 32;
constexpr uint32_t y = 32;
auto gmem_layout = make_layout(make_shape(Int<x>{}, Int<y>{}));
auto smem_layout = make_layout(make_shape(Int<x>{}, Int<y>{}));
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
#if 0
// Fast path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefault2DDynamicStrides)
{
using value_type = float;
constexpr uint32_t x = 32;
constexpr uint32_t y = 32;
auto gmem_layout = make_layout(make_shape(Int<x>{}, Int<y>{}), make_stride(1, x));
auto smem_layout = make_layout(make_shape(Int<x>{}, Int<y>{}), make_stride(1, x));
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// Fast path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefault2DMixedStrides)
{
using value_type = float;
constexpr uint32_t x = 32;
constexpr uint32_t y = 32;
auto gmem_layout = make_layout(make_shape(Int<x>{}, Int<y>{}));
auto smem_layout = make_layout(make_shape(Int<x>{}, Int<y>{}), make_stride(1, x));
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
#endif
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefault2DFallback)
{
using value_type = float;
constexpr uint32_t x = 37;
constexpr uint32_t y = 37;
auto gmem_layout = make_layout(make_shape(Int<x>{}, Int<y>{}));
auto smem_layout = make_layout(make_shape(Int<x>{}, Int<y>{}));
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// Fast Path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefault2DCustomStride)
{
using value_type = float;
constexpr uint32_t x = 16;
constexpr uint32_t y = 16;
auto gmem_layout = make_layout(make_shape(Int<x>{}, Int<y>{}), make_stride(Int<y>{}, Int<1>{}));
auto smem_layout = make_layout(make_shape(Int<x>{}, Int<y>{}), make_stride(Int<1>{}, Int<x>{}));
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// Fast path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefault3D)
{
using value_type = cute::half_t;
constexpr uint32_t x = 8;
constexpr uint32_t y = 8;
constexpr uint32_t z = 16;
auto gmem_layout = make_layout(make_shape(Int<x>{}, Int<y>{}, Int<z>{}));
auto smem_layout = make_layout(make_shape(Int<x>{}, Int<y>{}, Int<z>{}));
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// Fast path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefault2Dto3D)
{
using value_type = double;
constexpr uint32_t x = 16;
constexpr uint32_t y = 16;
constexpr uint32_t z = 4;
auto gmem_layout = make_layout(make_shape(Int<x>{}, Int<y*z>{}));
auto smem_layout = make_layout(make_shape(Int<z>{}, Int<y>{}, Int<x>{}));
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// Fast path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefaultCustom1)
{
using value_type = double;
auto gmem_layout = make_layout(
make_shape(Int<8>{}, make_shape(Int<2>{}, Int<2>{})),
make_stride(Int<2>{}, make_shape(Int<1>{}, Int<16>{}))
);
auto smem_layout = make_layout(
make_shape(Int<8>{}, Int<4>{}),
make_stride(Int<4>{}, Int<1>{})
);
constexpr uint32_t thread_block_size = 8;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// Fast Path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefaultCustom2)
{
using value_type = float;
auto gmem_layout = make_layout(
make_shape(make_shape(Int<4>{}, Int<2>{}), make_shape(Int<2>{}, Int<2>{})),
make_stride(make_shape(Int<4>{}, Int<1>{}), make_shape(Int<16>{}, Int<2>{}))
);
auto smem_layout = make_layout(
make_shape(make_shape(Int<2>{}, Int<2>{}, Int<2>{}), make_shape(Int<2>{}, Int<2>{})),
make_stride(make_shape(Int<16>{}, Int<4>{}, Int<1>{}), make_shape(Int<8>{}, Int<2>{}))
);
constexpr uint32_t thread_block_size = 16;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// Fast Path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefaultSwizzle1)
{
using value_type = float;
auto gmem_layout = Layout<Shape<_8, _64>, Stride<_64, _1>>{};
auto smem_layout = composition(Swizzle<3, 3, 3>{}, Layout<Shape<_8, _64>, Stride<_64, _1>>{});
constexpr uint32_t thread_block_size = 128;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// Fast Path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefaultSwizzle2)
{
using value_type = cute::half_t;
auto gmem_layout = make_layout(make_shape(Int<64>{}, Int<64>{}));
auto smem_atom_layout = composition(Swizzle<3, 2, 3>{}, Layout<Shape<_8, _32>, Stride<_32, _1>>{});
auto smem_layout = tile_to_shape(
smem_atom_layout,
make_shape(shape<0>(gmem_layout), shape<1>(gmem_layout))
);
constexpr uint32_t thread_block_size = 128;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// Fast Path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefaultSwizzle3)
{
using value_type = cute::half_t;
auto gmem_layout = make_layout(make_shape(Int<64>{}, Int<64>{}));
auto smem_atom_layout = composition(Swizzle<2, 4, 3>{}, Layout<Shape<_16, _64>, Stride<_64, _1>>{});
auto smem_layout = tile_to_shape(
smem_atom_layout,
make_shape(shape<0>(gmem_layout), shape<1>(gmem_layout))
);
constexpr uint32_t thread_block_size = 128;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// Fast path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefaultSwizzle4)
{
using value_type = cute::half_t;
auto gmem_atom_layout = composition(Swizzle<3, 2, 3>{}, Layout<Shape<_8, _32>, Stride<_32, _1>>{});
auto smem_layout = make_layout(make_shape(Int<64>{}, Int<64>{}));
auto gmem_layout = tile_to_shape(
gmem_atom_layout,
make_shape(shape<0>(smem_layout), shape<1>(smem_layout))
);
constexpr uint32_t thread_block_size = 128;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// Needs coalescing to work on fast path
// OK if we enforce slow path
// Problem: Wrong condition when we select between slow and fast path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefaultCoalesceToCompose)
{
constexpr int m = 96;
using value_type = cute::half_t;
auto gmem_layout = make_layout(make_shape(Int<m>{}, Int<m>{}), GenColMajor{});
auto smem_layout = make_layout(make_shape(Int<m>{}, Int<m>{}), GenColMajor{});
constexpr uint32_t thread_block_size = 128;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// Fast path (default): OK
// Slow path (enforced): OK
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefaultSwizzle5)
{
constexpr int m = 64;
constexpr int n = 128;
using value_type = cute::half_t;
auto gmem_layout = make_layout(make_shape(Int<m>{}, Int<n>{}), GenColMajor{});
// auto smem_layout = make_layout(make_shape(Int<m>{}, Int<n>{}), GenColMajor{}));
auto smem_atom_layout =
composition(Swizzle<3,3,3>{},
Layout<Shape < _8,_64>,
Stride<_64, _1>>{});
auto smem_layout = tile_to_shape(
smem_atom_layout,
make_shape(shape<0>(gmem_layout), shape<1>(gmem_layout))
);
constexpr uint32_t thread_block_size = 128;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// If condition not strict enought will go to fast path
// This test needs checking if CuTe can compose layouts
// Fast path (default): fail
// Slow path (enforced): Should go to vectorized naive path
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefaultSwizzleNaiveVectorizable)
{
constexpr int m = 192;
constexpr int n = 64;
using value_type = cute::half_t;
auto gmem_layout = make_layout(make_shape(Int<m>{}, Int<n>{}), GenColMajor{});
// auto smem_layout = make_layout(make_shape(Int<m>{}, Int<n>{}), GenColMajor{});
auto smem_atom_layout =
composition(Swizzle<3,3,3>{},
Layout<Shape <_64, _8>,
Stride< _1,_64>>{});
auto smem_layout = tile_to_shape(
smem_atom_layout,
shape(gmem_layout)
);
constexpr uint32_t thread_block_size = 128;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// fast path: ok (chosen)
// slow path: ok
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefaultRowMajorSmall)
{
constexpr int m = 24;
constexpr int n = 8;
using value_type = cute::half_t;
auto gmem_layout = make_layout(make_shape(Int<m>{}, Int<n>{}), GenRowMajor{});
auto smem_layout = make_layout(make_shape(Int<m>{}, Int<n>{}), GenRowMajor{});
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// fast path: doesn't apply
// slow path: ok
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefaultSlowPath)
{
constexpr int m = 67;
constexpr int n = 67;
using value_type = cute::half_t;
auto gmem_layout = make_layout(make_shape(Int<m>{}, Int<n>{}), GenRowMajor{});
auto smem_layout = make_layout(make_shape(Int<m>{}, Int<n>{}), GenRowMajor{});
constexpr uint32_t thread_block_size = 64;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
// fast path: doesn't apply
// slow path: should vectorize
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopyDefaultSwizzleSlowPathVectorize)
{
constexpr int m = 68;
constexpr int n = 68;
using value_type = cute::half_t;
auto gmem_layout = make_layout(make_shape(Int<m>{}, Int<n>{}), GenRowMajor{});
auto smem_layout = make_layout(make_shape(Int<m>{}, Int<n>{}), GenRowMajor{});
constexpr uint32_t thread_block_size = 32;
test_cooperative_copy_default<typename TestFixture::mode,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
TYPED_TEST(SM80_CuTe_Ampere, CooperativeCopy48x48Swizzle)
{
constexpr int m = 48;
constexpr int n = 48;
using value_type = cute::half_t;
auto gmem_layout = make_layout(make_shape(Int<m>{}, Int<n>{}), GenRowMajor{});
auto smem_layout = composition(Swizzle<2,2,3>{},
Layout<Shape <Shape <_16, _3, Int<48>>>,
Stride<Stride< _1, Int<768>, _16>>>{});
constexpr uint32_t thread_block_size = 8 * 32;
test_cooperative_copy_default<cooperative_copy_mode::shared_shared,
TestFixture::max_vec_bits,
thread_block_size,
value_type>(gmem_layout, smem_layout);
}
| test/unit/cute/ampere/cooperative_copy.cu/0 | {
"file_path": "test/unit/cute/ampere/cooperative_copy.cu",
"repo_id": "test",
"token_count": 11510
} | 44 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for epilogues
*/
#pragma once
#include <fstream>
#include <cfenv>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/complex.h"
#include "cutlass/quaternion.h"
#include "cutlass/platform/platform.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace kernel {
template <typename Epilogue>
__global__ void epilogue_threadblock(
typename Epilogue::OutputTileIterator::Params params_D,
typename Epilogue::OutputTileIterator::Element *ptr_D,
typename Epilogue::OutputTileIterator::Params params_C,
typename Epilogue::OutputTileIterator::Element *ptr_C,
typename Epilogue::OutputOp::Params params_output_op,
cutlass::MatrixCoord problem_size,
cutlass::TensorRef<
typename Epilogue::WarpMmaOperator::ElementC,
typename Epilogue::WarpMmaOperator::LayoutC> accumulator_ref,
int epilogue_count = 1) {
__shared__ typename Epilogue::SharedStorage shared_storage;
int thread_idx = threadIdx.x;
int warp_idx = threadIdx.x / 32;
int lane_idx = threadIdx.x % 32;
//
// Construct the epilogue
//
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_D(
params_D,
ptr_D,
problem_size,
thread_idx
);
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_C(
params_C,
ptr_C,
problem_size,
thread_idx
);
// Epilogue operator
Epilogue epilogue(
shared_storage,
thread_idx,
warp_idx,
lane_idx);
//
// Initialize the accumulators
//
int warp_mn = warp_idx % (Epilogue::WarpCount::kM * Epilogue::WarpCount::kN);
int warp_m = warp_mn % Epilogue::WarpCount::kM;
int warp_n = warp_mn / Epilogue::WarpCount::kM;
accumulator_ref.add_coord_offset({
warp_m * Epilogue::WarpMmaOperator::Shape::kM,
warp_n * Epilogue::WarpMmaOperator::Shape::kN});
typename Epilogue::WarpMmaOperator::IteratorC accumulator_iterator(accumulator_ref, lane_idx);
typename Epilogue::AccumulatorTile accumulators;
accumulators.clear();
accumulator_iterator.load(accumulators);
#if 0
// For debugging, enable this block of code to fill each accumulator element with its
// source thread ID.
CUTLASS_PRAGMA_UNROLL
for (size_t i = 0; i < accumulators.size(); ++i) {
typename Epilogue::WarpMmaOperator::ElementC x(threadIdx.x);
accumulators[i] = x;
}
__syncthreads();
#endif
//
// Perform the epilogue operation
//
typename Epilogue::OutputOp output_op(params_output_op);
// Place the epilogue in a loop
for (int iter = 0; iter < epilogue_count; ++iter) {
epilogue(output_op, iterator_D, accumulators, iterator_C);
}
}
} // namespace kernel
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Epilogue_
>
class EpilogueTestbed {
public:
using Epilogue = Epilogue_;
using ElementAccumulator = typename Epilogue::ElementAccumulator;
using ElementCompute = typename Epilogue::OutputOp::ElementCompute;
using ElementOutput = typename Epilogue::ElementOutput;
using OutputOpParams = typename Epilogue::OutputOp::Params;
public:
//
// Data members
//
cutlass::MatrixCoord quantized_size;
cutlass::HostTensor<ElementAccumulator, cutlass::layout::RowMajor> accumulator_tensor;
cutlass::HostTensor<ElementOutput, cutlass::layout::RowMajor> source_tensor;
cutlass::HostTensor<ElementOutput, cutlass::layout::RowMajor> output_tensor;
public:
//
// Methods
//
EpilogueTestbed():
quantized_size(Epilogue::Shape::kM, Epilogue::Shape::kN),
accumulator_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}),
source_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}),
output_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}) {
//
// Initialize problem space
//
uint64_t seed = 2019;
cutlass::reference::host::TensorFillRandomUniform(
accumulator_tensor.host_view(),
seed,
2,
-2,
0);
cutlass::reference::host::TensorFillRandomUniform(
source_tensor.host_view(),
seed + 2018,
2,
-2,
0);
}
bool run_all() {
double alpha_values[] = {1, 0, 2.25};
double beta_values[] = {0, 1, -1.25};
// Test runtime explodes if we tried to test every case exhaustively. This tests the full
// output tile and several smaller sizes to stress predication.
for (int m_idx = 0; m_idx < 3; ++m_idx) {
for (int n_idx = 0; n_idx < 3; ++n_idx) {
int m = quantized_size.row() - m_idx * 3;
int n = quantized_size.column() - n_idx * Epilogue::kElementsPerAccess;
for (double const &alpha : alpha_values) {
for (double const &beta : beta_values) {
bool passed = run({m, n}, {cutlass::from_real<ElementCompute>(alpha), cutlass::from_real<ElementCompute>(beta)});
if (!passed) {
return false;
}
}
}
}
}
return true;
}
/// Runs the test
bool run(
cutlass::MatrixCoord problem_size,
OutputOpParams output_params) {
//
// Initialize problem space
//
ElementOutput default_output = ElementOutput(-127);
cutlass::reference::host::TensorFill(output_tensor.host_view(), default_output);
accumulator_tensor.sync_device();
output_tensor.sync_device();
source_tensor.sync_device();
//
// Initialize epilogue parameters
//
typename Epilogue::OutputTileIterator::Params params_D(output_tensor.device_ref().layout());
typename Epilogue::OutputTileIterator::Params params_C(source_tensor.device_ref().layout());
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(Epilogue::WarpCount::kCount * 32, 1);
test::kernel::epilogue_threadblock<Epilogue><<< grid, block >>>(
params_D,
output_tensor.device_data(),
params_C,
source_tensor.device_data(),
output_params,
problem_size,
accumulator_tensor.device_view());
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Kernel error: " << cudaGetErrorString(result) << std::endl;
return false;
}
//
// Verify results
//
output_tensor.sync_host();
int errors = 0;
int const kMaxErrors = 5;
for (int r = 0; errors < kMaxErrors && r < quantized_size.row(); ++r) {
for (int c = 0; errors < kMaxErrors && c < quantized_size.column(); ++c) {
cutlass::MatrixCoord coord{r, c};
ElementOutput got = output_tensor.at(coord);
ElementOutput expected;
if (coord.row() < problem_size.row() && coord.column() < problem_size.column()) {
ElementCompute intermediate =
output_params.alpha * ElementCompute(accumulator_tensor.at(coord)) +
output_params.beta * ElementCompute(source_tensor.at(coord));
if ((cutlass::platform::is_same<ElementOutput, cutlass::int4b_t>::value
|| cutlass::platform::is_same<ElementOutput, cutlass::uint4b_t>::value
|| std::numeric_limits<ElementOutput>::is_integer)
&& !std::numeric_limits<ElementCompute>::is_integer) {
std::fesetround(FE_TONEAREST);
expected = ElementOutput(std::nearbyint(float(cutlass::real(intermediate))));
} else {
expected = ElementOutput(intermediate);
}
} else {
expected = default_output;
}
if (expected != got) {
using OutputIO = cutlass::ScalarIO<ElementOutput>;
EXPECT_TRUE(false)
<< "-------\n"
<< "Error - output element (" << coord << ") - expected: "
<< OutputIO(expected)
<< ", got: " << OutputIO(got)
<< ", accum: " << (accumulator_tensor.at(coord))
<< ", source: " << OutputIO(source_tensor.at(coord))
<< ", alpha: " << (output_params.alpha)
<< ", beta: " << (output_params.beta) << "\n";
++errors;
}
}
}
//
// Report results on error
//
if (errors) {
std::stringstream ss;
ss
<< "output_tensor_op_" << Epilogue::Shape::kM << "x" << Epilogue::Shape::kN << "_"
<< Epilogue::WarpTileIterator::WarpShape::kM << "x"
<< Epilogue::WarpTileIterator::WarpShape::kN
<< "_slice_" << Epilogue::WarpCount::kK << ".csv";
std::ofstream output_file(ss.str());
output_file << output_tensor.host_view();
}
return !errors;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/threadblock/testbed.h/0 | {
"file_path": "test/unit/epilogue/threadblock/testbed.h",
"repo_id": "test",
"token_count": 4068
} | 45 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
Testbed for sparse operations not to be released for CUDA 11.0 GA. Expected release is 11.1.
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/host_uncompress.h"
#include "testbed_utils.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
struct SparseTestbed {
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute;
static int const kSparse = Gemm::GemmKernel::kSparse;
static int const kMetaSizeInBits = Gemm::GemmKernel::kMetaSizeInBits;
static int const kMaxID2 = Gemm::GemmKernel::kMaxID2;
static int const kElementsPerElementE = Gemm::GemmKernel::kElementsPerElementE;
using ElementE = typename Gemm::GemmKernel::ElementE;
using LayoutE = cutlass::layout::RowMajor;
using ReorderedLayoutE = typename Gemm::GemmKernel::LayoutE;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
cutlass::Distribution::Kind init_E;
uint64_t seed;
cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A;
cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A_uncompressed;
cutlass::HostTensor<typename Gemm::ElementB, typename Gemm::LayoutB> tensor_B;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_C;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_D;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> reference_D;
cutlass::HostTensor<ElementE, LayoutE> tensor_E;
cutlass::HostTensor<ElementE, ReorderedLayoutE> tensor_E_reordered;
//
// Methods
//
SparseTestbed(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_E_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080)
: init_A(init_A_),
init_B(init_B_),
init_C(init_C_),
init_E(init_E_),
seed(seed_) {}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 1;
scope_min = -1;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the GEMM workspace
//
tensor_A.resize(cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse));
tensor_A_uncompressed.resize(problem_size.mk());
tensor_B.resize(problem_size.kn());
tensor_C.resize(problem_size.mn());
tensor_D.resize(problem_size.mn());
reference_D.resize(problem_size.mn(), false);
tensor_E.resize(cutlass::make_Coord(
problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
tensor_E_reordered.resize(cutlass::make_Coord(
problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
if (init_E == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomSparseMeta(
tensor_E.host_view(), seed, kMetaSizeInBits);
} else if (init_E == cutlass::Distribution::Identity) {
uint32_t content = (kMaxID2 == 1) ? 0x44444444 : 0x4444;
cutlass::reference::host::TensorFill(tensor_E.host_view(),
(ElementE)(content));
} else {
EXPECT_TRUE(false);
}
cutlass::reorder_meta(tensor_E_reordered.host_ref(), tensor_E.host_ref(),
{problem_size.m(), problem_size.n(),
problem_size.k() / kSparse / kElementsPerElementE});
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = typename Gemm::ElementA(1);
tensor_B.host_view().at({0, 0}) = typename Gemm::ElementB(1);
tensor_C.host_view().at({0, 0}) = typename Gemm::ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
tensor_E_reordered.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
if (tensor_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
if (reference_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view());
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Gemm_device_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Gemm::ThreadblockShape::kM << "x"
<< Gemm::ThreadblockShape::kN << "x"
<< Gemm::ThreadblockShape::kK << "_"
<< Gemm::WarpShape::kM << "x"
<< Gemm::WarpShape::kN << "x"
<< Gemm::WarpShape::kK << ".txt";
std::ofstream file(fname.str());
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\nE =\n" << tensor_E.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\nComputed =\n" << tensor_D.host_view();
}
return passed;
}
/// Verifies the result is a GEMM
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
//
// Verify
//
cutlass::uncompress(tensor_A_uncompressed.host_ref(), tensor_A.host_ref(),
tensor_E.host_ref(), problem_size.m(), problem_size.k());
cutlass::reference::host::Gemm<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
typename Gemm::ElementC, typename Gemm::LayoutC,
ElementCompute,
ElementAccumulator, typename Gemm::Operator>
reference_gemm;
reference_gemm(
problem_size,
alpha,
tensor_A_uncompressed.host_ref(),
tensor_B.host_ref(),
beta,
reference_D.host_ref(),
ElementAccumulator(0)
);
return compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmCoord problem_size,
int split_k_slices = 1,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size,
split_k_slices,
{alpha, beta},
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data(),
tensor_D.device_data(),
tensor_E_reordered.device_data(),
int64_t(),
int64_t(),
int64_t(),
int64_t(),
int64_t(),
tensor_A.layout().stride(0),
tensor_B.layout().stride(0),
tensor_C.layout().stride(0),
tensor_D.layout().stride(0),
tensor_E_reordered.layout().stride(0)
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
// This failure is likely due to insufficient device capabilities. Waive the test.
if (status != cutlass::Status::kSuccess) {
return true;
}
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Error with split_k_slices = " << split_k_slices << ", alpha: " << alpha << ", beta: " << beta << ", m: " << problem_size.m() << ", n: " << problem_size.n() << ", k:" <<problem_size.k() << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
bool TestAllSparseGemm() {
bool passed = true;
int const kMinimumOperandElementSize =
std::min(
int(cutlass::sizeof_bits<typename Gemm::ElementA>::value),
int(cutlass::sizeof_bits<typename Gemm::ElementB>::value));
// M dimension has to be multiple of 32 (sparse float) or 16 (sparse int)
// because of the reordering of operand E
int const kAlignmentM = std::max(((sizeof(typename Gemm::ElementE) == 2) ? 32 : 16),
kMinimumOperandElementSize);
int const kAlignmentN = 128 / kMinimumOperandElementSize;
int problem_size_m[] = {kAlignmentM, 512 - 3 * kAlignmentM};
int problem_size_n[] = {kAlignmentN, 512 - 2 * kAlignmentN};
int problem_size_k[] = {Gemm::ThreadblockShape::kK * 8};
int split_k_slices[] = {
1, 2
};
double problem_alpha[] = {
1
};
double problem_beta[] = {
2.0
};
SparseTestbed<Gemm> testbed;
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (int split_k : split_k_slices) {
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
cutlass::gemm::GemmCoord problem_size(m, n, k);
passed = testbed.run(
problem_size,
split_k,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
if (!passed) {
return false;
}
}
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_sparse.h/0 | {
"file_path": "test/unit/gemm/device/testbed_sparse.h",
"repo_id": "test",
"token_count": 6385
} | 46 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit test for the OrderedSequenceBarrier class
*/
#include "../common/cutlass_unit_test.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/arch/cluster_sm90.hpp>
#include <cutlass/util/reference/host/gemm.h>
#include <cutlass/cluster_launch.hpp>
#include "cutlass/core_io.h"
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "testbed.h"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/arch/barrier.h"
#include "cute/arch/cluster_sm90.hpp"
using namespace cute;
//////////////////// KERNEL /////////////////////////
template<typename OrderedSequencer>
struct SharedStorage
{
typename OrderedSequencer::SharedStorage storage;
};
// Goal of this kernel is to complete deadlock-free
template<int Stages, int GroupCount, int ThreadsPerGroup>
__global__ static
void ordered_sequence_device(uint32_t const num_iterations)
{
extern __shared__ char shared_memory[];
using SequenceBarrier = typename cutlass::OrderedSequenceBarrier<Stages, GroupCount>;
using SmemStorage = SharedStorage<SequenceBarrier>;
SmemStorage& shared_storage = *reinterpret_cast<SmemStorage*>(shared_memory);
int group_idx = threadIdx.x / ThreadsPerGroup;
typename SequenceBarrier::Params params;
params.group_id = group_idx; // sequence ID
params.group_size = ThreadsPerGroup; // Number of threads / participants in a group
SequenceBarrier barrier(shared_storage.storage, params);
// Ensure All CTAs in Cluster have completed init before issuing commits
__syncthreads();
cute::cluster_arrive_relaxed();
cute::cluster_wait();
CUTLASS_PRAGMA_NO_UNROLL
for (int i = 0; i < num_iterations; ++i){
barrier.wait();
// STAGE 1 CODE...
#ifndef NDEBUG
int thread_idx_in_group = threadIdx.x % ThreadsPerGroup;
if (thread_idx_in_group == 0) {
printf("STAGE 0 : Group_IDX : %d, id = %d, iter = %d, tidx = %d\n", group_idx, params.group_id, i, threadIdx.x);
}
#endif
// Simulates long running stage
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
__nanosleep(100000);
#endif
barrier.arrive();
barrier.wait();
// STAGE 2 CODE...
#ifndef NDEBUG
if (thread_idx_in_group == 0) {
printf("STAGE 1 : Group_IDX : %d, id = %d, iter = %d, tidx = %d\n", group_idx, params.group_id, i, threadIdx.x);
}
#endif
// Simulates long running stage
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
__nanosleep(100000);
#endif
barrier.arrive();
}
// To make sure remote SMEM doesn't get destroyed
cute::cluster_arrive();
cute::cluster_wait();
}
/////////////////////////////////////////////////////
template<uint32_t Stages_, uint32_t GroupCount_>
struct PipelineTest {
//
// Data members
//
static constexpr uint32_t ThreadsPerGroup = 128;
static constexpr uint32_t BlockSize = GroupCount_ * ThreadsPerGroup;
static constexpr uint32_t Stages = Stages_;
static constexpr uint32_t GroupCount = GroupCount_;
using SequenceBarrier = typename cutlass::OrderedSequenceBarrier<Stages, GroupCount>;
using SmemStorage = SharedStorage<SequenceBarrier>;
//
// Methods
//
// Run CuTe GEMM kernel
cudaError_t run(uint32_t const kNumIters,
cudaStream_t stream = nullptr) {
// Pipeline (multistage pipeline)
auto cluster_shape = Shape<_1, _1, _1>{};
//
// Configure and launch
//
int iterations = 1;
cudaError_t result;
for (int iter = 0; iter < iterations; ++iter) {
int smem_size = int(sizeof(SmemStorage));
result = cudaFuncSetAttribute(
ordered_sequence_device<Stages, GroupCount, ThreadsPerGroup>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
// Launch a single Cluster, with 128 thread per CTA
dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), size<2>(cluster_shape));
dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1);
dim3 dimBlock(BlockSize,1,1);
const void* kernel = (const void*)ordered_sequence_device<Stages, GroupCount, ThreadsPerGroup>;
int iters = kNumIters;
void* kernel_params[] = {reinterpret_cast<void*>(&iters)};
cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params);
} // profiling loop ends
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl;
return result;
}
return cudaSuccess;
}
};
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
TEST(SM90_Verify_OrderedSequence, Depth_2_Length_2) {
Options options;
static constexpr uint32_t GroupCount = 2;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, GroupCount>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_OrderedSequence, Depth_2_Length_3) {
Options options;
static constexpr uint32_t GroupCount = 3;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, GroupCount>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_OrderedSequence, Depth_2_Length_4) {
Options options;
static constexpr uint32_t GroupCount = 4;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, GroupCount>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_OrderedSequence, Depth_2_Length_5) {
Options options;
static constexpr uint32_t GroupCount = 5;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, GroupCount>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
#endif
| test/unit/pipeline/sequence_barrier.cu/0 | {
"file_path": "test/unit/pipeline/sequence_barrier.cu",
"repo_id": "test",
"token_count": 2644
} | 47 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
\file
\brief Defines a data structure in which a set of functionally equivalent library::Operation
instances may be queried.
*/
#pragma once
#include <fstream>
#include <iosfwd>
#include <unordered_map>
#include <algorithm>
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "cutlass/library/util.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Data Structures for Gemm Functional Maps
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tuple uniquely identifying Gemm functional behavior
struct GemmFunctionalKey {
Provider provider;
GemmKind gemm_kind;
NumericTypeID element_compute;
NumericTypeID element_scalar;
NumericTypeID element_A;
LayoutTypeID layout_A;
ComplexTransform transform_A;
NumericTypeID element_B;
LayoutTypeID layout_B;
ComplexTransform transform_B;
NumericTypeID element_C;
LayoutTypeID layout_C;
NumericTypeID element_D;
LayoutTypeID layout_D;
//
// Methods
//
inline
GemmFunctionalKey(
Provider provider,
GemmKind gemm_kind = GemmKind::kGemm,
NumericTypeID element_compute = NumericTypeID::kF32,
NumericTypeID element_scalar = NumericTypeID::kF32,
NumericTypeID element_A = NumericTypeID::kF16,
LayoutTypeID layout_A = LayoutTypeID::kColumnMajor,
ComplexTransform transform_A = ComplexTransform::kNone,
NumericTypeID element_B = NumericTypeID::kF16,
LayoutTypeID layout_B = LayoutTypeID::kColumnMajor,
ComplexTransform transform_B = ComplexTransform::kNone,
NumericTypeID element_C = NumericTypeID::kF16,
LayoutTypeID layout_C = LayoutTypeID::kColumnMajor,
NumericTypeID element_D = NumericTypeID::kF16,
LayoutTypeID layout_D = LayoutTypeID::kColumnMajor
):
provider(provider),
gemm_kind(gemm_kind),
element_compute(element_compute),
element_scalar(element_scalar),
element_A(element_A),
layout_A(layout_A),
transform_A(transform_A),
element_B(element_B),
layout_B(layout_B),
transform_B(transform_B),
element_C(element_C),
layout_C(layout_C),
element_D(element_D),
layout_D(layout_D)
{ }
inline
bool operator==(GemmFunctionalKey const &rhs) const {
return
(provider == rhs.provider) &&
(gemm_kind == rhs.gemm_kind) &&
(element_compute == rhs.element_compute) &&
(element_scalar == rhs.element_scalar) &&
(element_A == rhs.element_A) &&
(layout_A == rhs.layout_A) &&
(transform_A == rhs.transform_A) &&
(element_B == rhs.element_B) &&
(layout_B == rhs.layout_B) &&
(transform_B == rhs.transform_B) &&
(element_C == rhs.element_C) &&
(layout_C == rhs.layout_C) &&
(element_D == rhs.element_D) &&
(layout_D == rhs.layout_D);
}
inline
bool operator!=(GemmFunctionalKey const &rhs) const {
return !(*this == rhs);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
inline
std::ostream & operator<<(std::ostream &out, cutlass::library::GemmFunctionalKey const &k) {
out << "{\n"
<< " provider: " << to_string(k.provider) << "\n"
<< " gemm_kind: " << to_string(k.gemm_kind) << "\n"
<< " element_compute: " << to_string(k.element_compute) << "\n"
<< " element_scalar: " << to_string(k.element_scalar) << "\n"
<< " element_A: " << to_string(k.element_A) << "\n"
<< " layout_A: " << to_string(k.layout_A) << "\n"
<< " transform_A: " << to_string(k.transform_A) << "\n"
<< " element_B: " << to_string(k.element_B) << "\n"
<< " layout_B: " << to_string(k.layout_B) << "\n"
<< " transform_B: " << to_string(k.transform_B) << "\n"
<< " element_C: " << to_string(k.element_C) << "\n"
<< " layout_C: " << to_string(k.layout_C) << "\n"
<< " element_D: " << to_string(k.element_D) << "\n"
<< " layout_D: " << to_string(k.layout_D) << "\n"
<< "}";
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Hash function for GemmFunctionalKey
struct GemmFunctionalKeyHasher {
using IntHash = std::hash<int>;
inline
static size_t rotl(size_t key, int shl) {
return (key << shl) | (key >> (sizeof(key)*8u - static_cast<size_t>(shl)));
}
inline
size_t operator()(GemmFunctionalKey const &key) const {
IntHash hash;
return
rotl(hash(int(key.provider)), 1) ^
rotl(hash(int(key.gemm_kind)), 2) ^
rotl(hash(int(key.element_compute)), 3) ^
rotl(hash(int(key.element_scalar)), 4) ^
rotl(hash(int(key.element_A)), 5) ^
rotl(hash(int(key.layout_A)), 6) ^
rotl(hash(int(key.transform_A)), 7) ^
rotl(hash(int(key.element_B)), 8) ^
rotl(hash(int(key.layout_B)), 9) ^
rotl(hash(int(key.transform_B)), 10) ^
rotl(hash(int(key.element_C)), 11) ^
rotl(hash(int(key.layout_C)), 12) ^
rotl(hash(int(key.element_D)), 13) ^
rotl(hash(int(key.layout_D)), 14);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Establishes a partial ordering to search for GEMM operators
struct GemmPreferenceKey {
int compute_capability;
int alignment;
//
// Methods
//
GemmPreferenceKey(): compute_capability(), alignment() { }
GemmPreferenceKey(int cc, int alignment): compute_capability(cc), alignment(alignment) { }
bool operator<(GemmPreferenceKey const &rhs) const {
return (compute_capability < rhs.compute_capability) ||
((compute_capability == rhs.compute_capability) && (alignment < rhs.alignment));
}
bool operator==(GemmPreferenceKey const &rhs) const {
return compute_capability == rhs.compute_capability;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
inline
std::ostream& operator<< (std::ostream& out, const cutlass::library::GemmPreferenceKey& key) {
out << "{\n"
<< "compute_capability : " << key.compute_capability << std::endl
<< "alignment : " << key.alignment << std::endl
<< "}";
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Maps minimum compute capability onto a vector of possible operations
using GemmOperationVectorMap = std::map<
GemmPreferenceKey,
std::vector<Operation const *>
>;
/// Maps a GemmFunctionalKey onto a vector of Operation * objects expected to be of kind kGemm
using GemmOperationFunctionalMap = std::unordered_map<
GemmFunctionalKey,
GemmOperationVectorMap,
GemmFunctionalKeyHasher
>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Data Structures for Conv Functional Maps
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tuple uniquely identifying conv2d functional behavior
struct ConvFunctionalKey {
library::Provider provider;
library::ConvKind conv_kind;
library::NumericTypeID element_A;
library::LayoutTypeID layout_A;
library::NumericTypeID element_B;
library::LayoutTypeID layout_B;
library::NumericTypeID element_C;
library::LayoutTypeID layout_C;
library::NumericTypeID element_accumulator;
library::NumericTypeID element_compute;
//
// Methods
//
inline
ConvFunctionalKey(
library::Provider provider = library::Provider::kInvalid,
library::ConvKind conv_kind = library::ConvKind::kFprop,
library::NumericTypeID element_A = library::NumericTypeID::kF16,
library::LayoutTypeID layout_A = library::LayoutTypeID::kTensorNHWC,
library::NumericTypeID element_B = library::NumericTypeID::kF16,
library::LayoutTypeID layout_B = library::LayoutTypeID::kTensorNHWC,
library::NumericTypeID element_C = library::NumericTypeID::kF16,
library::LayoutTypeID layout_C = library::LayoutTypeID::kTensorNHWC,
library::NumericTypeID element_accumulator = library::NumericTypeID::kF32,
library::NumericTypeID element_compute = library::NumericTypeID::kF32
):
provider(provider),
conv_kind(conv_kind),
element_A(element_A),
layout_A(layout_A),
element_B(element_B),
layout_B(layout_B),
element_C(element_C),
layout_C(layout_C),
element_accumulator(element_accumulator),
element_compute(element_compute)
{ }
inline
bool operator==(ConvFunctionalKey const &rhs) const {
return
(provider == rhs.provider) &&
(conv_kind == rhs.conv_kind) &&
(element_A == rhs.element_A) &&
(layout_A == rhs.layout_A) &&
(element_B == rhs.element_B) &&
(layout_B == rhs.layout_B) &&
(element_C == rhs.element_C) &&
(layout_C == rhs.layout_C) &&
(element_accumulator == rhs.element_accumulator) &&
(element_compute == rhs.element_compute);
}
inline
bool operator!=(ConvFunctionalKey const &rhs) const {
return !(*this == rhs);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
inline
std::ostream& operator<< (std::ostream& out, const cutlass::library::ConvFunctionalKey& key) {
out << "{\n"
<< "provider: " << to_string(key.provider) << std::endl
<< "conv_kind: " << to_string(key.conv_kind) << std::endl
<< "element_A: " << to_string(key.element_A) << std::endl
<< "layout_A: " << to_string(key.layout_A) << std::endl
<< "element_B: " << to_string(key.element_B) << std::endl
<< "layout_B: " << to_string(key.layout_B) << std::endl
<< "element_C: " << to_string(key.element_C) << std::endl
<< "layout_C: " << to_string(key.layout_C) << std::endl
<< "element_accumulator: " << to_string(key.element_accumulator) << std::endl
<< "element_compute: " << to_string(key.element_compute) << std::endl
<< "}";
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
struct ConvFunctionalKeyHasher {
using IntHash = std::hash<int>;
inline
static size_t rotl(size_t key, int shl) {
return (key << shl) | (key >> (sizeof(key)*8u - static_cast<size_t>(shl)));
}
inline
size_t operator()(ConvFunctionalKey const &key) const {
IntHash hash;
return
rotl(hash(int(key.provider)), 1) ^
rotl(hash(int(key.conv_kind)), 2) ^
rotl(hash(int(key.element_A)), 3) ^
rotl(hash(int(key.layout_A)), 4) ^
rotl(hash(int(key.element_B)), 5) ^
rotl(hash(int(key.layout_B)), 6) ^
rotl(hash(int(key.element_C)), 7) ^
rotl(hash(int(key.layout_C)), 8) ^
rotl(hash(int(key.element_accumulator)), 9) ^
rotl(hash(int(key.element_compute)), 10);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Establishes a partial ordering to search for Conv2d operators
struct ConvPreferenceKey {
int compute_capability;
IteratorAlgorithmID iterator_algorithm;
//
// Methods
//
ConvPreferenceKey(): compute_capability(), iterator_algorithm() { }
ConvPreferenceKey(int cc, IteratorAlgorithmID iterator_algorithm):
compute_capability(cc), iterator_algorithm(iterator_algorithm) { }
bool operator<(ConvPreferenceKey const &rhs) const {
return (compute_capability < rhs.compute_capability) ||
((compute_capability == rhs.compute_capability) && (iterator_algorithm < rhs.iterator_algorithm));
}
bool operator==(ConvPreferenceKey const &rhs) const {
return (compute_capability == rhs.compute_capability) &&
(iterator_algorithm == rhs.iterator_algorithm);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Maps minimum compute capability onto a vector of possible operations
using ConvOperationVectorMap = std::map<
ConvPreferenceKey,
std::vector<Operation const *>
>;
/// Maps a GemmFunctionalKey onto a vector of Operation * objects expected to be of kind kGemm
using ConvOperationFunctionalMap = std::unordered_map<
ConvFunctionalKey,
ConvOperationVectorMap,
ConvFunctionalKeyHasher
>;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tuple uniquely identifying conv2d functional behavior
struct ReductionFunctionalKey {
library::Provider provider;
library::NumericTypeID element_workspace;
library::NumericTypeID element_accumulator;
library::NumericTypeID element_output;
library::NumericTypeID element_compute;
library::MathOperationID reduce_math_op;
library::EpilogueKind epilogue_math_op;
//
// Methods
//
inline
ReductionFunctionalKey(
library::Provider provider = library::Provider::kInvalid,
library::NumericTypeID element_workspace = library::NumericTypeID::kF16,
library::NumericTypeID element_accumulator = library::NumericTypeID::kF32,
library::NumericTypeID element_output = library::NumericTypeID::kF16,
library::NumericTypeID element_compute = library::NumericTypeID::kF32,
library::MathOperationID reduce_math_op = library::MathOperationID::kAdd,
library::EpilogueKind epilogue_math_op = library::EpilogueKind::kLinearCombination
):
provider(provider),
element_workspace(element_workspace),
element_accumulator(element_accumulator),
element_output(element_output),
element_compute(element_compute),
reduce_math_op(reduce_math_op),
epilogue_math_op(epilogue_math_op)
{ }
inline
bool operator==(ReductionFunctionalKey const &rhs) const {
return
(provider == rhs.provider) &&
(element_workspace == rhs.element_workspace) &&
(element_accumulator == rhs.element_accumulator) &&
(element_output == rhs.element_output) &&
(element_compute == rhs.element_compute) &&
(reduce_math_op == rhs.reduce_math_op) &&
(epilogue_math_op == rhs.epilogue_math_op);
}
inline
bool operator!=(ReductionFunctionalKey const &rhs) const {
return !(*this == rhs);
}
};
struct ReductionFunctionalKeyHasher {
using IntHash = std::hash<int>;
inline
static size_t rotl(size_t key, int shl) {
return (key << shl) | (key >> (sizeof(key)*8u - static_cast<size_t>(shl)));
}
inline
size_t operator()(ReductionFunctionalKey const &key) const {
IntHash hash;
return
rotl(hash(int(key.provider)), 1) ^
rotl(hash(int(key.element_workspace)), 2) ^
rotl(hash(int(key.element_accumulator)), 3) ^
rotl(hash(int(key.element_output)), 4) ^
rotl(hash(int(key.element_compute)), 5) ^
rotl(hash(int(key.reduce_math_op)), 6) ^
rotl(hash(int(key.epilogue_math_op)), 7);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
inline
std::ostream& operator<< (std::ostream& out, const ReductionFunctionalKey& key) {
out << "{\n"
<< "provider: " << library::to_string(key.provider) << std::endl
<< "element_workspace : " << library::to_string(key.element_workspace) << std::endl
<< "element_accumulator : " << library::to_string(key.element_accumulator) << std::endl
<< "element_output : " << library::to_string(key.element_output) << std::endl
<< "element_compute : " << library::to_string(key.element_compute) << std::endl
<< "}";
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// ReductionOperationFunctionalMap has NO preference key and a single instance per functional key
// i.e. only one tile size configuration per functional key
using ReductionOperationFunctionalMap = std::unordered_map<
ReductionFunctionalKey,
library::Operation const *,
ReductionFunctionalKeyHasher
>;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Table of cutlass::library::Operation instances
class OperationTable {
public:
/// Map of all operations of type kGemm
// provider (kCUTLASS)
GemmOperationFunctionalMap gemm_operations;
/// Map of all operations of type kConv2d
// provider (kCUTLASS, kReferenceHost, kReferenceDevice)
ConvOperationFunctionalMap conv2d_operations;
/// Map of all operations of type kConv3d
// provider (kCUTLASS, kReferenceHost, kReferenceDevice)
ConvOperationFunctionalMap conv3d_operations;
/// Map of all operations of type kConv2d
// provider (kCUTLASS)
ReductionOperationFunctionalMap reduction_operations;
public:
void append(Manifest const &manifest);
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
std::ostream & operator<<(std::ostream &out, cutlass::library::GemmFunctionalKey const &k);
| tools/library/include/cutlass/library/operation_table.h/0 | {
"file_path": "tools/library/include/cutlass/library/operation_table.h",
"repo_id": "tools",
"token_count": 6529
} | 48 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for reduction operation in CUTLASS Library.
*/
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "reduction_operation.h"
namespace cutlass {
namespace library {
// naming convention initialize_reduce_[ReductionOp]_[EpilogueOp]_[ElementWorkspace]_[ElementAccumulator]_[ElementOutput]
void initialize_reduce_add_linear_combination_f16_f16_f16(Manifest &manifest) {
using ElementWorkspace = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using ElementCompute = cutlass::half_t;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementWorkspace>::value,
ElementAccumulator,
ElementCompute
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using Operation_reduce_add_linear_combination_f16_f16_f16 = cutlass::reduction::device::ReduceSplitK<
cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>
>;
manifest.append(new ReductionOperation<
Operation_reduce_add_linear_combination_f16_f16_f16>(
"reduce_add_linear_combination_f16_f16_f16"
));
}
void initialize_reduce_add_linear_combination_f32_f32_f16(Manifest &manifest) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = cutlass::half_t;
using ElementCompute = float;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementWorkspace>::value,
ElementAccumulator,
ElementCompute
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using Operation_reduce_add_linear_combination_f32_f32_f16 = cutlass::reduction::device::ReduceSplitK<
cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>
>;
manifest.append(new ReductionOperation<
Operation_reduce_add_linear_combination_f32_f32_f16>(
"reduce_add_linear_combination_f32_f32_f16"
));
}
void initialize_reduce_add_linear_combination_f32_f32_bf16(Manifest &manifest) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = cutlass::bfloat16_t;
using ElementCompute = float;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementWorkspace>::value,
ElementAccumulator,
ElementCompute
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using Operation_reduce_add_linear_combination_f32_f32_bf16 = cutlass::reduction::device::ReduceSplitK<
cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>
>;
manifest.append(new ReductionOperation<
Operation_reduce_add_linear_combination_f32_f32_bf16>(
"reduce_add_linear_combination_f32_f32_bf16"
));
}
void initialize_reduce_add_linear_combination_f32_f32_f32(Manifest &manifest) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = float;
using ElementCompute = float;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementWorkspace>::value,
ElementAccumulator,
ElementCompute
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using Operation_reduce_add_linear_combination_f32_f32_f32 = cutlass::reduction::device::ReduceSplitK<
cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>
>;
manifest.append(new ReductionOperation<
Operation_reduce_add_linear_combination_f32_f32_f32>(
"reduce_add_linear_combination_f32_f32_f32"
));
}
void initialize_reduce_add_linear_combination_f64_f64_f64(Manifest &manifest) {
using ElementWorkspace = double;
using ElementAccumulator = double;
using ElementOutput = double;
using ElementCompute = double;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementWorkspace>::value,
ElementAccumulator,
ElementCompute
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using Operation_reduce_add_linear_combination_f64_f64_f64 = cutlass::reduction::device::ReduceSplitK<
cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>
>;
manifest.append(new ReductionOperation<
Operation_reduce_add_linear_combination_f64_f64_f64>(
"reduce_add_linear_combination_f64_f64_f64"
));
}
void initialize_reduce_add_linear_combination_cf32_cf32_cf32(Manifest &manifest) {
using ElementWorkspace = cutlass::complex<float>;
using ElementAccumulator = cutlass::complex<float>;
using ElementOutput = cutlass::complex<float>;
using ElementCompute = cutlass::complex<float>;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementWorkspace>::value,
ElementAccumulator,
ElementCompute
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using Operation_reduce_add_linear_combination_cf32_cf32_cf32 = cutlass::reduction::device::ReduceSplitK<
cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>
>;
manifest.append(new ReductionOperation<
Operation_reduce_add_linear_combination_cf32_cf32_cf32>(
"reduce_add_linear_combination_cf32_cf32_cf32"
));
}
}
}
| tools/library/src/reduction/reduction_device.cu/0 | {
"file_path": "tools/library/src/reduction/reduction_device.cu",
"repo_id": "tools",
"token_count": 2880
} | 49 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#pragma once
#include <stdexcept>
#include <list>
#include <vector>
#include "cutlass/library/library.h"
#include "cutlass/util/distribution.h"
#include "enumerated_types.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Device memory allocation
class DeviceAllocation {
private:
/// Data type of contained elements
library::NumericTypeID type_;
/// Gets the stride between elements
size_t batch_stride_;
/// Capacity in elements of device allocation
size_t capacity_;
/// Pointer to device memory
void *pointer_;
/// Layout type ID
library::LayoutTypeID layout_;
/// Stride vector
std::vector<int64_t> stride_;
/// Extent vector
std::vector<int> extent_;
/// Support allocating a 'batch' of non-overlapping tensors in contiguous memory
int batch_count_;
/// Buffer holding TensorRef instance to recently allocated memory
std::vector<uint8_t> tensor_ref_buffer_;
public:
//
// Static member functions
//
/// Determines the number of bytes needed to represent this numeric type
static size_t bytes(library::NumericTypeID type, size_t capacity);
/// Returns the stride of a packed layout
static std::vector<int64_t> get_packed_layout(
library::LayoutTypeID layout_id,
std::vector<int> const &extent);
/// returns the capacity needed
static size_t construct_layout(
void *bytes,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> &stride);
/// Returns true if two blocks have exactly the same value
static bool block_compare_equal(
library::NumericTypeID numeric_type,
void const *ptr_A,
void const *ptr_B,
size_t capacity);
/// Returns true if two blocks have approximately the same value
static bool block_compare_relatively_equal(
library::NumericTypeID numeric_type,
void const *ptr_A,
void const *ptr_B,
size_t capacity,
double epsilon,
double nonzero_floor);
public:
//
// Methods
//
DeviceAllocation();
DeviceAllocation(library::NumericTypeID type, size_t capacity);
DeviceAllocation(
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride = std::vector<int64_t>(),
int batch_count = 1);
~DeviceAllocation();
DeviceAllocation &reset();
/// Allocates device memory of a given type and capacity
DeviceAllocation &reset(library::NumericTypeID type, size_t capacity);
/// Allocates memory for a given layout and tensor
DeviceAllocation &reset(
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride = std::vector<int64_t>(),
int batch_count = 1);
/// Returns a buffer owning the tensor reference
std::vector<uint8_t> &tensor_ref() {
return tensor_ref_buffer_;
}
bool good() const;
/// Data type of contained elements
library::NumericTypeID type() const;
/// Pointer to start of device memory allocation
void *data() const;
/// Pointer to the first element of a batch
void *batch_data(int batch_idx) const;
/// Gets the layout type
library::LayoutTypeID layout() const;
/// Gets the stride vector
std::vector<int64_t> const & stride() const;
/// Gets the extent vector
std::vector<int> const & extent() const;
/// Gets the number of adjacent tensors in memory
int batch_count() const;
/// Gets the stride (in units of elements) between items
int64_t batch_stride() const;
/// Gets the stride (in units of bytes) between items
int64_t batch_stride_bytes() const;
/// Capacity of allocation in number of elements
size_t capacity() const;
/// Capacity of allocation in bytes
size_t bytes() const;
/// Initializes a device allocation to a random distribution using cuRAND
void initialize_random_device(int seed, Distribution dist);
/// Initializes a host allocation to a random distribution using std::cout
void initialize_random_host(int seed, Distribution dist);
/// Initializes a device allocation to a sequential distribution
void initialize_sequential_device(Distribution dist);
/// Initializes a host allocation to a sequential distribution
void initialize_sequential_host(Distribution dist);
/// Initializes a device allocation to a random distribution using cuRAND
void initialize_random_sparsemeta_device(int seed, int MetaSizeInBits);
/// Initializes a host allocation to a random distribution using std::cout
void initialize_random_sparsemeta_host(int seed, int MetaSizeInBits);
/// Uniformly fills a tensor with a value when provided o.w. zero
void fill_device(double value);
/// Uniformly fills a host allocation with a value when provided o.w. zero
void fill_host(double value);
/// Copies from an equivalent-sized tensor in device memory
void copy_from_device(void const *ptr);
/// Copies from an equivalent-sized tensor in device memory
void copy_from_host(void const *ptr);
/// Copies from an equivalent-sized tensor in device memory
void copy_to_host(void *ptr);
/// Writes a tensor to csv
void write_tensor_csv(std::ostream &out);
};
using DeviceAllocationList = std::list<DeviceAllocation>;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/include/cutlass/profiler/device_allocation.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/device_allocation.h",
"repo_id": "tools",
"token_count": 2109
} | 50 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Convolution 2D profiling
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cutlass/profiler/conv2d_operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
using namespace cutlass::library;
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
Conv2dOperationProfiler::Conv2dOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kConv2d,
{
{ArgumentTypeID::kEnumerated, {"conv_kind"}, "Convolutional operator (fprop, dgrad, wgrad)"},
{ArgumentTypeID::kInteger, {"n", "input_n"}, "Input N dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"h", "input_h"}, "Input H dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"w", "input_w"}, "Input W dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"c", "input_c"}, "Input C dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"k", "filter_k"}, "Filter K dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"r", "filter_r"}, "Filter R dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"s", "filter_s"}, "Filter S dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"p", "output_p"}, "Output P dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"q", "output_q"}, "Output Q dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"g", "groups"}, "Number of convolution groups"},
{ArgumentTypeID::kInteger, {"pad_h"}, "Padding in H direction"},
{ArgumentTypeID::kInteger, {"pad_w"}, "Padding in W direction"},
{ArgumentTypeID::kInteger, {"stride_h"}, "Stride in H direction"},
{ArgumentTypeID::kInteger, {"stride_w"}, "Stride in W direction"},
{ArgumentTypeID::kInteger, {"dilation_h"}, "Dilation in H direction"},
{ArgumentTypeID::kInteger, {"dilation_w"}, "Dilation in W direction"},
{ArgumentTypeID::kTensor, {"Activation"}, "Tensor storing the Activation operand"},
{ArgumentTypeID::kTensor, {"Filter"}, "Tensor storing the Filter operand"},
{ArgumentTypeID::kTensor, {"Output"}, "Tensor storing the Output operand"},
{ArgumentTypeID::kEnumerated, {"conv_mode"}, "Convolution filter mode (conv, cross)"},
{ArgumentTypeID::kEnumerated, {"iterator_algorithm", "iterator_algo"}, "Convolution iterator algorithm (analytic, optimized)"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "SplitK mode for serial or parallel reduction (serial, parallel)"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kEnumerated, {"eq_gemm_provider", "eq-gemm-provider"}, "Enable profiling equivalent gemm by the following providers (cutlass)"},
},
{ library::Provider::kReferenceDevice, library::Provider::kReferenceHost, library::Provider::kCUDNN }
) {
description_ = " Conv2d operation. Output(Tensor4D) = alpha * Input(Tensor4D) * Filter(Tensor4D) + beta * Input(Tensor4D)";
}
/// Destructor
Conv2dOperationProfiler::~Conv2dOperationProfiler() {
}
/// Prints usage statement for the math function
void Conv2dOperationProfiler::print_usage(std::ostream &out) const {
out << "Conv2d" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void Conv2dOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular convolution (specify all the convolution parameters):\n"
<< " $ cutlass_profiler --operation=Conv2d"
" --Activation=f16:nhwc --Filter=f16:nhwc --Output=f16 --accumulator-type=f32"
" --n=32 --h=14 --w=14 --c=8 --k=64 --r=3 --s=3"
" --pad_h=1 --pad_w=1"
" --stride_h=1 --stride_w=1"
" --dilation_h=1 --dilation_w=1\n\n";
}
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Total number of bytes loaded
int64_t Conv2dOperationProfiler::Conv2dProblem::bytes(
library::ConvDescription const &operation_desc) const {
cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind);
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes_ =
int64_t(library::sizeof_bits(operation_desc.A.element) * mnk.m() / 8) * mnk.k() +
int64_t(library::sizeof_bits(operation_desc.B.element) * mnk.n() / 8) * mnk.k() +
int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n();
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes_ += int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n();
}
return bytes_;
}
/// Total number of flops computed
int64_t Conv2dOperationProfiler::Conv2dProblem::flops(
library::ConvDescription const &operation_desc) const {
cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind);
int64_t flops_mainloop_ = int64_t(mnk.m()) * mnk.n() * mnk.k() * 2;
int64_t flops_epilogue_ = int64_t(mnk.m()) * int64_t(mnk.n()) * 2;
// Adjust mainloop flop for dgrad strided
if (operation_desc.conv_kind == library::ConvKind::kDgrad) {
flops_mainloop_ = flops_mainloop_ / (stride_h * stride_w);
}
int64_t flops_total_ = flops_mainloop_ + flops_epilogue_;
//complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_total_ *=4;
break;
default: break;
}
return flops_total_;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status Conv2dOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::ConvDescription const &operation_desc =
static_cast<library::ConvDescription const &>(operation->description());
if (!arg_as_int(problem_.n, "n", problem_space, problem)) {
// default value
problem_.n = 1;
}
if (!arg_as_int(problem_.h, "h", problem_space, problem)) {
// default value
problem_.h = 16;
}
if (!arg_as_int(problem_.w, "w", problem_space, problem)) {
// default value
problem_.w = 16;
}
if (!arg_as_int(problem_.c, "c", problem_space, problem)) {
// default value
problem_.c = 64;
}
if (!arg_as_int(problem_.k, "k", problem_space, problem)) {
// default value
problem_.k = 64;
}
if (!arg_as_int(problem_.r, "r", problem_space, problem)) {
// default value
problem_.r = 3;
}
if (!arg_as_int(problem_.s, "s", problem_space, problem)) {
// default value
problem_.s = 3;
}
if (!arg_as_int(problem_.groups, "g", problem_space, problem)) {
// default value
problem_.groups = 1;
}
if (!arg_as_int(problem_.pad_h, "pad_h", problem_space, problem)) {
// default value
problem_.pad_h = 1;
}
if (!arg_as_int(problem_.pad_w, "pad_w", problem_space, problem)) {
// default value
problem_.pad_w = 1;
}
if (!arg_as_int(problem_.stride_h, "stride_h", problem_space, problem)) {
// default value
problem_.stride_h = 1;
}
if (!arg_as_int(problem_.stride_w, "stride_w", problem_space, problem)) {
// default value
problem_.stride_w = 1;
}
if (!arg_as_int(problem_.dilation_h, "dilation_h", problem_space, problem)) {
// default value
problem_.dilation_h = 1;
}
if (!arg_as_int(problem_.dilation_w, "dilation_w", problem_space, problem)) {
// default value
problem_.dilation_w = 1;
}
//////////////////////// Convolution output dimensions p and q ////////////////////////
// Cutlass convolutions support arbitrary output sizes and not constrained by //
// input, filter, padding, striding, dilation sizes. //
// cuDNN sets the output dimensions (p, q) using following equations: //
// //
// output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) //
// where; div_up(a, b) : (a - 1)/b + 1 //
// //
// Thus, when output p and q dimensions are unspecified by the user //
// cutlass profiler sets p and q which are cuDNN compliant. //
// //
////////////////////////////////////////////////////////////////////////////////////////
// set convolution output p
if (!arg_as_int(problem_.p, "p", problem_space, problem)) {
// default value (set using cudnn formula for output height, when p is not provided)
problem_.p = (
problem_.h +
2 * problem_.pad_h -
((problem_.r - 1) * problem_.dilation_h + 1)
) / (problem_.stride_h)
+ 1;
}
// set convolution output q
if (!arg_as_int(problem_.q, "q", problem_space, problem)) {
// default value (set using cudnn formula for output width, when q is not provided)
problem_.q = (
problem_.w +
2 * problem_.pad_w -
((problem_.s - 1) * problem_.dilation_w + 1)
) / (problem_.stride_w)
+ 1;
}
/////////////////////////////////////////////////////////////////////////////////////////
if (!arg_as_SplitKModeID(problem_.split_k_mode, "split_k_mode", problem_space, problem)) {
// default value
problem_.split_k_mode = library::SplitKMode::kSerial;
}
if (!arg_as_int(problem_.split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
problem_.split_k_slices = 1;
}
if (!arg_as_ConvModeID(problem_.conv_mode, "conv_mode", problem_space, problem)) {
// default value
problem_.conv_mode = library::ConvModeID::kCrossCorrelation;
}
if (!arg_as_ProviderID(problem_.eq_gemm_provider, "eq_gemm_provider", problem_space, problem)) {
// default value
problem_.eq_gemm_provider = library::Provider::kNone;
}
if (!conv_kind_satisfies(operation_desc.conv_kind, "conv_kind", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!iterator_algorithm_satisfies(operation_desc.iterator_algorithm, "iterator_algorithm", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.activation(), "Activation", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.filter(), "Filter", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.output(), "Output", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
problem_.alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(problem_.alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
problem_.beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(problem_.beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
// initialize library::Conv2dConfiguration
conv_workspace_.configuration.problem_size = conv::Conv2dProblemSize(
int(problem_.n),
int(problem_.h),
int(problem_.w),
int(problem_.c),
int(problem_.k),
int(problem_.r),
int(problem_.s),
int(problem_.p),
int(problem_.q),
int(problem_.pad_h),
int(problem_.pad_w),
int(problem_.stride_h),
int(problem_.stride_w),
int(problem_.dilation_h),
int(problem_.dilation_w),
static_cast<conv::Mode>(static_cast<int>(problem_.conv_mode)),
int(problem_.split_k_slices),
int(problem_.groups)
);
conv_workspace_.configuration.split_k_mode = static_cast<conv::SplitKMode>(static_cast<int>(problem_.split_k_mode));
conv_workspace_.set_stride_vector(
problem_, operation_desc.conv_kind, operation_desc.A.layout,
operation_desc.B.layout, operation_desc.C.layout);
// initialize library::ConvArguments
conv_workspace_.arguments.A = nullptr;
conv_workspace_.arguments.B = nullptr;
conv_workspace_.arguments.C = nullptr;
conv_workspace_.arguments.D = nullptr;
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
// initialize reduction operation for parallel splitKMode
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if(!initialize_reduction_configuration_(options, report, device_context, operation, problem_space, problem)) {
return Status::kErrorInternal;
}
}
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&conv_workspace_.configuration, &conv_workspace_.arguments);
}
/// Initializes the performance result
void Conv2dOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::ConvDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
result.arguments.resize(problem_space.rank());
set_argument(result, "Activation", problem_space,
std::string(library::to_string(operation_desc.activation().element))
+ ":" + library::to_string(operation_desc.activation().layout));
set_argument(result, "Filter", problem_space,
std::string(library::to_string(operation_desc.filter().element))
+ ":" + library::to_string(operation_desc.filter().layout));
set_argument(result, "Output", problem_space,
std::string(library::to_string(operation_desc.output().element))
+ ":" + library::to_string(operation_desc.output().layout));
set_argument(result, "conv_kind", problem_space, library::to_string(operation_desc.conv_kind));
set_argument(result, "iterator_algorithm", problem_space, std::string(library::to_string(operation_desc.iterator_algorithm)));
set_argument(result, "n", problem_space, problem_.n);
set_argument(result, "h", problem_space, problem_.h);
set_argument(result, "w", problem_space, problem_.w);
set_argument(result, "c", problem_space, problem_.c);
set_argument(result, "k", problem_space, problem_.k);
set_argument(result, "r", problem_space, problem_.r);
set_argument(result, "s", problem_space, problem_.s);
set_argument(result, "p", problem_space, problem_.p);
set_argument(result, "q", problem_space, problem_.q);
set_argument(result, "g", problem_space, problem_.groups);
set_argument(result, "pad_h", problem_space, problem_.pad_h);
set_argument(result, "pad_w", problem_space, problem_.pad_w);
set_argument(result, "stride_h", problem_space, problem_.stride_h);
set_argument(result, "stride_w", problem_space, problem_.stride_w);
set_argument(result, "dilation_h", problem_space, problem_.dilation_h);
set_argument(result, "dilation_w", problem_space, problem_.dilation_w);
set_argument(result, "split_k_mode", problem_space,
std::string(library::to_string(problem_.split_k_mode)));
set_argument(result, "split_k_slices", problem_space, problem_.split_k_slices);
set_argument(result, "conv_mode", problem_space,
std::string(library::to_string(problem_.conv_mode)));
set_argument(result, "alpha", problem_space,
library::lexical_cast(problem_.alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(problem_.beta, operation_desc.element_epilogue));
set_argument(result, "eq_gemm_provider", problem_space,
std::string(library::to_string(problem_.eq_gemm_provider)));
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
// Bytes of activation, filter, and output tensors
int64_t activation_bytes = int64_t(library::sizeof_bits(operation_desc.activation().element) / 8) *
conv_workspace_.configuration.problem_size.activation_size();
int64_t filter_bytes = int64_t(library::sizeof_bits(operation_desc.filter().element) / 8) *
conv_workspace_.configuration.problem_size.filter_size();
int64_t output_bytes = int64_t(library::sizeof_bits(operation_desc.output().element) / 8) *
conv_workspace_.configuration.problem_size.output_size();
// Bytes of activation, filter, and output tensors
result.bytes = problem_.bytes(operation_desc);
// Theoretical flops required for the computation
result.flops = problem_.flops(operation_desc);
// Measured runtime
result.runtime = 0;
}
/// Initialize reduction problem dimensions and library::Operation
bool Conv2dOperationProfiler::initialize_reduction_configuration_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::ConvDescription const &conv_desc =
static_cast<library::ConvDescription const &>(operation->description());
library::ConvKind const &conv_kind = conv_desc.conv_kind;
if (!cast_from_double(problem_.alpha_one, conv_desc.element_epilogue, 1)) {
return false;
}
if (!cast_from_double(problem_.beta_zero, conv_desc.element_epilogue, 0)) {
return false;
}
/// This chooses the appropriate stride element of the row-major C tensor.
int const & tensor_c_stride_idx = (conv_kind == library::ConvKind::kWgrad ? 2 : 0);
/// initialize library::ReductionConfiguration
conv_workspace_.reduction_configuration.problem_size = problem_.eq_gemm_size(conv_kind).mn();
conv_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices);
conv_workspace_.reduction_configuration.partition_stride = problem_.eq_gemm_size(conv_kind).mn().product();
conv_workspace_.reduction_configuration.ldw =
conv_workspace_.configuration.stride_c[tensor_c_stride_idx];
conv_workspace_.reduction_configuration.lds =
conv_workspace_.configuration.stride_c[tensor_c_stride_idx];
conv_workspace_.reduction_configuration.ldd =
conv_workspace_.configuration.stride_c[tensor_c_stride_idx];
// find reduction operation
library::ReductionFunctionalKey reduction_key(
library::Provider::kCUTLASS,
conv_desc.tile_description.math_instruction.element_accumulator, // element workspace
conv_desc.tile_description.math_instruction.element_accumulator, // element accumulator
conv_desc.C.element, // element output
conv_desc.element_epilogue // element compute
);
#if 0// debug print to check which reduction instance is selected
std::cout << reduction_key << "\n";
#endif
auto reduction_it = Singleton::get().operation_table.reduction_operations.find(reduction_key);
if(reduction_it == Singleton::get().operation_table.reduction_operations.end()) {
return false;
}
// initialize reduction operation required for parallel split-k conv2d operator
reduction_op_ = reduction_it->second;
// reduction operation found and initialized
return true;
}
/// Initializes workspace
Status Conv2dOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
// initialize conv2d underlying operation to handle parallel reduction
library::Operation const* underlying_operation = operation;
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
return Status::kErrorNotSupported;
}
}
library::ConvDescription const &operation_desc =
static_cast<library::ConvDescription const &>(underlying_operation->description());
// Compute the number of copies of the problem to avoid L2 camping.
if (!options.profiling.workspace_count) {
int64_t bytes = problem_.bytes(operation_desc);
if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) {
conv_workspace_.problem_count =
1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes);
}
else {
conv_workspace_.problem_count = 1;
}
}
else {
conv_workspace_.problem_count = options.profiling.workspace_count;
}
if (options.execution_mode != ExecutionMode::kDryRun) {
int seed_shift = 0;
conv_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
problem_.extent_a(operation_desc.conv_kind),
conv_workspace_.configuration.stride_a,
conv_workspace_.problem_count,
seed_shift++
);
conv_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
problem_.extent_b(operation_desc.conv_kind),
conv_workspace_.configuration.stride_b,
conv_workspace_.problem_count,
seed_shift++
);
if(problem_.groups == problem_.c && problem_.groups == problem_.k){
// Depthwise direct conv kernel needs reorder the filter.
conv_workspace_.reordered_B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
problem_.extent_b(operation_desc.conv_kind),
conv_workspace_.configuration.stride_b,
conv_workspace_.problem_count,
seed_shift++
);
}
conv_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
problem_.extent_c(operation_desc.conv_kind),
conv_workspace_.configuration.stride_c,
conv_workspace_.problem_count,
seed_shift++
);
conv_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
problem_.extent_c(operation_desc.conv_kind),
conv_workspace_.configuration.stride_c,
conv_workspace_.problem_count
);
conv_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
problem_.extent_c(operation_desc.conv_kind),
conv_workspace_.configuration.stride_c,
conv_workspace_.problem_count
);
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = underlying_operation->get_host_workspace_size(&conv_workspace_.configuration);
conv_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = underlying_operation->get_device_workspace_size(&conv_workspace_.configuration);
conv_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = underlying_operation->initialize(
&conv_workspace_.configuration,
conv_workspace_.host_workspace.data(),
conv_workspace_.device_workspace.data());
if (status != Status::kSuccess) {
return status;
}
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
workspace_size = reduction_op_->get_host_workspace_size(&conv_workspace_.reduction_configuration);
conv_workspace_.reduction_host_workspace.resize(workspace_size, 0);
status = reduction_op_->initialize(
&conv_workspace_.reduction_configuration,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
if (status != Status::kSuccess) {
return status;
}
}
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kConv2d;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool Conv2dOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
cudaError_t result;
// Initialize structure containing Conv2d arguments
conv_workspace_.arguments.A = conv_workspace_.A->data();
conv_workspace_.arguments.B = conv_workspace_.B->data();
conv_workspace_.arguments.C = conv_workspace_.C->data();
conv_workspace_.arguments.D = conv_workspace_.Computed->data();
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
if (conv_workspace_.reordered_B != nullptr){
conv_workspace_.arguments.reordered_B = conv_workspace_.reordered_B->data();
}else{
conv_workspace_.arguments.reordered_B = nullptr;
}
conv_workspace_.Computed->copy_from_device(conv_workspace_.C->data());
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
// update library::ConvArguments for parallel split-k reduction
conv_workspace_.arguments.D = conv_workspace_.device_workspace.data();
conv_workspace_.arguments.alpha = problem_.alpha_one.data();
conv_workspace_.arguments.beta = problem_.beta_zero.data();
/// initialize library::ReductionArguments
conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data();
conv_workspace_.reduction_arguments.source = conv_workspace_.C->data();
conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data();
conv_workspace_.reduction_arguments.alpha = problem_.alpha.data();
conv_workspace_.reduction_arguments.beta = problem_.beta.data();
conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
}
//
// Run the CUTLASS operation
//
// initialize conv2d underlying operation to handle parallel reduction
library::Operation const* underlying_operation = operation;
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
results_.back().disposition = Disposition::kFailed;
return false;
}
}
#if 0
std::cout << "profiling : " << std::endl
<< "conv2d : " << operation->description().name << std::endl
<< "underlying conv2d : " << underlying_operation->description().name << std::endl
<< "reduction : " << reduction_op_->description().name << std::endl;
#endif
// run cutlass conv2d operation
results_.back().status = underlying_operation->run(
&conv_workspace_.arguments,
conv_workspace_.host_workspace.data(),
conv_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// Run parallel reduction kernel for parallel split_k_mode
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
results_.back().status = reduction_op_->run(
&conv_workspace_.reduction_arguments,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
}
// Synchronize before running device reference
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUDNN
// Run verification cudnn reference
if (options.verification.provider_enabled(library::Provider::kCUDNN)) {
// Guard against unsupported cases
auto const & conv_desc = static_cast<library::ConvDescription const &>(operation->description());
Status status = cudnn_satisfies(conv_desc, conv_workspace_.configuration);
// Initialize reference data to the source data
conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
if (status == Status::kSuccess) {
// call cudnn verification if supported
verify_with_cudnn_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else if (status == Status::kErrorInvalidProblem) {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kInvalidProblem;
}
else {
// set verification map for cudnn to not supported
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUDNN
// Run verification device reference
if (options.verification.provider_enabled(library::Provider::kReferenceDevice)) {
// Restore reference data back to initial source data
conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
verify_with_device_reference_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
// Run verification host reference
if (options.verification.provider_enabled(library::Provider::kReferenceHost)) {
// Restore reference data back to initial source data
conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
verify_with_host_reference_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
/// Verifies CUTLASS against host reference
bool Conv2dOperationProfiler::verify_with_host_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
Status status;
//
// Find host reference operation using conv2d functional description key
//
library::OperationDescription const &desc = operation->description();
auto &conv_desc = static_cast<library::ConvDescription const &>(desc);
library::ConvFunctionalKey conv2d_key(
library::Provider::kReferenceHost,
conv_desc.conv_kind,
conv_desc.A.element,
conv_desc.A.layout,
conv_desc.B.element,
conv_desc.B.layout,
conv_desc.C.element,
conv_desc.C.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.element_epilogue);
#if 0 // debug print to check which host reference instance is selected
std::cout << conv2d_key << "\n";
#endif
auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key);
if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) {
results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun;
return true;
}
// conv2d host reference minimum cc is 0 (CPU) and no iterator algorithm
library::ConvPreferenceKey preference_key(0, library::IteratorAlgorithmID::kNone);
auto cc_it = operators_it->second.find(preference_key);
if(cc_it == operators_it->second.end()) {
results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun;
return true;
}
// host reference has only one instances in Conv2dOperationVectorMap
library::Operation const *reference_op = cc_it->second[0];
//
// Copy input tensors A, B, and C from device to host buffers
//
conv_workspace_.host_tensor_a.resize(conv_workspace_.A->bytes());
conv_workspace_.host_tensor_b.resize(conv_workspace_.B->bytes());
conv_workspace_.host_tensor_c.resize(conv_workspace_.C->bytes());
conv_workspace_.A->copy_to_host(conv_workspace_.host_tensor_a.data());
conv_workspace_.B->copy_to_host(conv_workspace_.host_tensor_b.data());
conv_workspace_.C->copy_to_host(conv_workspace_.host_tensor_c.data());
//
// Initialize structure containing Conv2d arguments
//
conv_workspace_.arguments.A = conv_workspace_.host_tensor_a.data();
conv_workspace_.arguments.B = conv_workspace_.host_tensor_b.data();
conv_workspace_.arguments.C = conv_workspace_.host_tensor_c.data();
conv_workspace_.arguments.D = conv_workspace_.host_tensor_c.data();
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Initialize host reference operation
//
std::vector<uint8_t> host_workspace_reference_op;
uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration);
host_workspace_reference_op.resize(workspace_size, 0);
reference_op->initialize(
&conv_workspace_.configuration,
host_workspace_reference_op.data());
//
// Run host reference operation
//
status = reference_op->run(
&conv_workspace_.arguments,
host_workspace_reference_op.data());
// Handle errors
if (status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotVerified;
return true;
}
//
// Copy host reference output to device memory for equality check on device
//
conv_workspace_.Reference->copy_from_host(conv_workspace_.arguments.D);
//
// Verify results
//
results_.back().verification_map[library::Provider::kReferenceHost] = compare_tensors(
options,
*conv_workspace_.Computed,
*conv_workspace_.Reference,
conv_workspace_.Computed->batch_stride()
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kReferenceHost] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
static_cast<library::ConvDescription const &>(operation->description()),
library::Provider::kCUTLASS,
library::Provider::kReferenceHost);
}
// Return true means continue profiling
return true;
}
/// Verifies CUTLASS against host reference
bool Conv2dOperationProfiler::verify_with_device_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
Status status;
//
// Find device reference operation using conv2d functional description key
//
library::OperationDescription const &desc = operation->description();
auto &conv_desc = static_cast<library::ConvDescription const &>(desc);
library::ConvFunctionalKey conv2d_key(
library::Provider::kReferenceDevice,
conv_desc.conv_kind,
conv_desc.A.element,
conv_desc.A.layout,
conv_desc.B.element,
conv_desc.B.layout,
conv_desc.C.element,
conv_desc.C.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.element_epilogue);
auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key);
if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) {
results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun;
return true;
}
// conv2d device reference minimum cc is 50 and no iterator algorithm
library::ConvPreferenceKey preference_key(50, library::IteratorAlgorithmID::kNone);
auto cc_it = operators_it->second.find(preference_key);
if(cc_it == operators_it->second.end()) {
results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun;
return true;
}
// device reference has only one instances in Conv2dOperationVectorMap
library::Operation const *reference_op = cc_it->second[0];
//
// Initialize device reference operation
//
std::vector<uint8_t> host_workspace_reference_op;
uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration);
host_workspace_reference_op.resize(workspace_size, 0);
reference_op->initialize(
&conv_workspace_.configuration,
host_workspace_reference_op.data());
// Initialize structure containing Conv2d arguments
conv_workspace_.arguments.A = conv_workspace_.A->data();
conv_workspace_.arguments.B = conv_workspace_.B->data();
conv_workspace_.arguments.C = conv_workspace_.C->data();
conv_workspace_.arguments.D = conv_workspace_.Reference->data();
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run device reference operation
//
status = reference_op->run(
&conv_workspace_.arguments,
host_workspace_reference_op.data());
// Handle errors
if (status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotVerified;
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kReferenceDevice] = compare_tensors(
options,
*conv_workspace_.Computed,
*conv_workspace_.Reference,
conv_workspace_.Computed->batch_stride()
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kReferenceDevice] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
static_cast<library::ConvDescription const &>(operation->description()),
library::Provider::kCUTLASS,
library::Provider::kReferenceDevice);
}
// Return true means continue profiling
return true;
}
/// Measures performance results
bool Conv2dOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing Conv2d arguments
conv_workspace_.arguments.A = conv_workspace_.A->data();
conv_workspace_.arguments.B = conv_workspace_.B->data();
conv_workspace_.arguments.C = conv_workspace_.C->data();
conv_workspace_.arguments.D = conv_workspace_.Computed->data();
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
// update library::ConvArguments for parallel split-k reduction
conv_workspace_.arguments.D = conv_workspace_.device_workspace.data();
conv_workspace_.arguments.alpha = problem_.alpha_one.data();
conv_workspace_.arguments.beta = problem_.beta_zero.data();
/// initialize library::ReductionArguments
conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data();
conv_workspace_.reduction_arguments.source = conv_workspace_.C->data();
conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data();
conv_workspace_.reduction_arguments.alpha = problem_.alpha.data();
conv_workspace_.reduction_arguments.beta = problem_.beta.data();
conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
}
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&conv_workspace_.arguments,
conv_workspace_.host_workspace.data(),
conv_workspace_.device_workspace.data()
);
}
return true;
}
/// Method to profile a CUTLASS Operation
Status Conv2dOperationProfiler::profile_cutlass_(
double &runtime,
Options const &options,
library::Operation const *operation,
void *arguments,
void *host_workspace,
void *device_workspace) {
GpuTimer timer;
// initialize conv2d underlying operation to handle parallel reduction
library::Operation const* underlying_operation = operation;
library::ConvArguments *conv_arguments = static_cast<library::ConvArguments *>(arguments);
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
return Status::kErrorNotSupported;
}
}
//
// Optional sleep to limit power consumption and thermals
//
sleep(options.profiling.sleep_duration);
//
// Warmup loop
//
Status status;
for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) {
// Setup rotating workspace
int workspace_idx = options.profiling.warmup_iterations + iteration;
int problem_idx = (workspace_idx % conv_workspace_.problem_count);
conv_arguments->A = conv_workspace_.A->batch_data(problem_idx);
conv_arguments->B = conv_workspace_.B->batch_data(problem_idx);
conv_arguments->C = conv_workspace_.C->batch_data(problem_idx);
conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx);
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
// update library::ConvArguments for parallel split-k reduction
conv_arguments->D = conv_workspace_.device_workspace.data();
/// initialize library::ReductionArguments
conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data();
conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx);
conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx);
}
// Run underlying conv2d operation
status = underlying_operation->run(
arguments,
host_workspace,
device_workspace);
// Run parallel reduction kernel for parallel split_k_mode
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
status = reduction_op_->run(
&conv_workspace_.reduction_arguments,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
}
if (status != Status::kSuccess) {
return status;
}
}
//
// Initialize GPU timer
//
timer.start();
//
// Profiling loop
//
int Iterations = options.profiling.iterations;
int iteration = 0;
for (; iteration < Iterations; ++iteration) {
// Setup rotating workspace
int problem_idx = (iteration % conv_workspace_.problem_count);
conv_arguments->A = conv_workspace_.A->batch_data(problem_idx);
conv_arguments->B = conv_workspace_.B->batch_data(problem_idx);
conv_arguments->C = conv_workspace_.C->batch_data(problem_idx);
conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx);
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
// update library::ConvArguments for parallel split-k reduction
conv_arguments->D = conv_workspace_.device_workspace.data();
/// initialize library::ReductionArguments
conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data();
conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx);
conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx);
}
// Run underlying conv2d operation
status = underlying_operation->run(
arguments,
host_workspace,
device_workspace);
// Run parallel reduction kernel for parallel split_k_mode
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
status = reduction_op_->run(
&conv_workspace_.reduction_arguments,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
}
if (status != Status::kSuccess) {
return status;
}
}
//
// Wait for completion
//
timer.stop_and_wait();
//
// Update performance result
//
runtime = timer.duration(iteration);
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if CUTLASS_ENABLE_CUDNN
/// Verifies CUTLASS against cudnn reference
bool Conv2dOperationProfiler::verify_with_cudnn_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
auto &conv_desc = static_cast<library::ConvDescription const &>(operation->description());
//
// Construct cudnn operators
//
CudnnCreate handle;
cudnnStatus_t status = handle.get_cudnn_create_status();
if (status != CUDNN_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status);
return true;
}
//
// Initialize state
//
// Initialize structure containing Conv2d arguments
conv_workspace_.arguments.A = conv_workspace_.A->data();
conv_workspace_.arguments.B = conv_workspace_.B->data();
conv_workspace_.arguments.D = conv_workspace_.Reference->data();
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
// cuDNN does not support four tensor arguments, so we copy the tensor C data into
// tensor D.
conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
conv_workspace_.arguments.C = conv_workspace_.arguments.D;
try {
//
// Construct dispatcher to cudnn operator
//
detail::cudnnConvDispatcher conv_op(
conv_desc,
conv_workspace_.configuration,
conv_workspace_.arguments,
handle
);
if (conv_op.status != Status::kSuccess) {
if (conv_op.status == Status::kErrorNotSupported) {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported;
} else {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed;
}
return true;
}
status = conv_op(handle);
// Handle errors
if (status != CUDNN_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status);
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUDNN] = compare_tensors(
options,
*conv_workspace_.Computed,
*conv_workspace_.Reference,
conv_workspace_.Computed->batch_stride()
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUDNN] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
conv_desc,
library::Provider::kCUTLASS,
library::Provider::kCUDNN);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed;
}
// Return true means continue profiling
return true;
}
#endif // #if CUTLASS_ENABLE_CUDNN
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/conv2d_operation_profiler.cu/0 | {
"file_path": "tools/profiler/src/conv2d_operation_profiler.cu",
"repo_id": "tools",
"token_count": 20115
} | 51 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cutlass/profiler/cublas_helpers.h"
#include "cutlass/profiler/rank_2k_operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
Rank2KOperationProfiler::Rank2KOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kRank2K,
{
{ArgumentTypeID::kEnumerated, {"rank_k_kind"}, "Variant of RankK (universal)"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the RankK problem space"},
{ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the RankK problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
{ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
{ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for RankK kernel (lower or upper)"},
{ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for RankK kernel (symmetric or hermitian)"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of RankK computed in one batch"},
},
{ library::Provider::kCUBLAS}
) {
description_ = " Rank 2k Update. D = alpha * (A*B^T + B*A^T) + beta * C (symmetric) or D = alpha * (A*B^H+B*A^H) + beta * C (hermitian)";
}
/// Destructor
Rank2KOperationProfiler::~Rank2KOperationProfiler() {
}
/// Prints usage statement for the math function
void Rank2KOperationProfiler::print_usage(std::ostream &out) const {
out << "RankK" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void Rank2KOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size Syrk kernel:\n"
<< " $ cutlass_profiler --operation=rank_2k --blas_mode=symmetric --n=1024 --k=128\n\n"
<< "Profile a particular problem size Herk kernel:\n"
<< " $ cutlass_profiler --operation=rank_2k --blas_mode=hermitian --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=rank_2k --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=rank_2k --accumulator-type=f16,f32\n\n"
<< "Schmoo over fill modees:\n"
<< " $ cutlass_profiler --operation=rank_2k --fill_mode=lower/upper\n\n"
<< "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=rank_2k --A=f16:column or --A=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=rank_2k --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=rank_2k --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=rank_2k --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=rank_2k --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to rank_2k kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=rank_2k \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status Rank2KOperationProfiler::RankKProblem::parse(
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->n), int(this->k)}).front();
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->n), int(this->k)}).front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->n), int(this->n)}).front();
return Status::kSuccess;
}
/// Total number of bytes loaded
int64_t Rank2KOperationProfiler::RankKProblem::bytes(library::RankKDescription const &operation_desc) const {
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes =
2 * int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k +
2 * int64_t(library::sizeof_bits(operation_desc.B.element) * n / 8) * k +
// Half matrix including the diagonal will have (N*(N+1))/2 elements
int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
}
bytes *= batch_count;
return bytes;
}
/// Total number of flops computed
int64_t Rank2KOperationProfiler::RankKProblem::flops(library::RankKDescription const &operation_desc) const {
// FLOPs = 2 * n(n+1)k/2 [mma1] + 2 * n(n+1)k/2 [mma2] + 2 * n(n+1)/2 [epilogue]
// FLOPs = n(n+1)(2k + 1)
int64_t flops_ = n * (n + 1) * (2*k + 1);
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddGaussianComplex:
flops_ *= 3;
break;
default: break;
}
return flops_;
}
/// Initializes a performance result
void Rank2KOperationProfiler::RankKProblem::initialize_result(
PerformanceResult &result,
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "rank_k_kind", problem_space, library::to_string(operation_desc.rank_k_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode));
set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode));
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status Rank2KOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::RankKDescription const &operation_desc =
static_cast<library::RankKDescription const &>(operation->description());
if (operation_desc.rank_k_kind != library::RankKKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
rank_k_workspace_.configuration.problem_size.m() = int(problem_.n);
rank_k_workspace_.configuration.problem_size.n() = int(problem_.n);
rank_k_workspace_.configuration.problem_size.k() = int(problem_.k);
rank_k_workspace_.configuration.lda = problem_.lda;
rank_k_workspace_.configuration.ldb = problem_.ldb;
rank_k_workspace_.configuration.ldc = problem_.ldc;
rank_k_workspace_.configuration.ldd = problem_.ldc;
//rank_k_workspace_.configuration.split_k_slices = int(problem_.split_k_slices);
rank_k_workspace_.configuration.batch_count = int(problem_.split_k_slices);
rank_k_workspace_.arguments.A = nullptr;
rank_k_workspace_.arguments.B = nullptr;
rank_k_workspace_.arguments.C = nullptr;
rank_k_workspace_.arguments.D = nullptr;
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&rank_k_workspace_.configuration, &rank_k_workspace_.arguments);
}
/// Initializes the performance result
void Rank2KOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
result.bytes = problem_.bytes(operation_desc);
result.flops = problem_.flops(operation_desc);
result.runtime = 0;
}
/// Initializes workspace
Status Rank2KOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::RankKDescription const &operation_desc =
static_cast<library::RankKDescription const &>(operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
int seed_shift = 0;
rank_k_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.n), int(problem_.k)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
rank_k_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
{int(problem_.n), int(problem_.k)},
{int(problem_.ldb)},
1, // batch_count
seed_shift++
);
rank_k_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)},
1, // batch_count
seed_shift++
);
rank_k_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)}
);
rank_k_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)}
);
rank_k_workspace_.Computed->copy_from_device(rank_k_workspace_.C->data());
rank_k_workspace_.Reference->copy_from_device(rank_k_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(&rank_k_workspace_.configuration);
rank_k_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(&rank_k_workspace_.configuration);
rank_k_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = operation->initialize(
&rank_k_workspace_.configuration,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kRank2K;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool Rank2KOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.B = rank_k_workspace_.B->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&rank_k_workspace_.arguments,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const & rank_k_desc = static_cast<library::RankKDescription const &>(operation->description());
if (cublas_satisfies(rank_k_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool Rank2KOperationProfiler::verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
#if CUTLASS_ENABLE_CUBLAS
library::RankKDescription const &rank_k_desc =
static_cast<library::RankKDescription const &>(operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
cublasStatus_t status = handle.get_cublas_create_status();
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to cublas<t>Syr2k()
//
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.B = rank_k_workspace_.B->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.Reference->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Reference->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
detail::cublasRankKDispatcher rank_k_op(
rank_k_desc,
rank_k_workspace_.configuration,
rank_k_workspace_.arguments
);
if (rank_k_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = rank_k_op(handle);
// Handle errors
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
options,
*rank_k_workspace_.Computed,
*rank_k_workspace_.Reference
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
rank_k_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool Rank2KOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.B = rank_k_workspace_.B->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&rank_k_workspace_.arguments,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/rank_2k_operation_profiler.cu/0 | {
"file_path": "tools/profiler/src/rank_2k_operation_profiler.cu",
"repo_id": "tools",
"token_count": 8854
} | 52 |
/******************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* \brief cuda kernels to do avg/max pooling on a device memory tensor with NHWC layout.
*/
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
#include "device_utils.h"
#include <float.h>
namespace cutlass {
/** \brief interface to do avg/max pooling on a device memory tensor with NHWC layout.
* \tparam T: data type
*/
template <typename T>
void pooling_nhwc(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord filter_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
cutlass::MatrixCoord padding,
cutlass::MatrixCoord stride,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNHWC> ref_output,
int poolingType, //0 for avg pooling ; 1 for max pooling
cudaStream_t stream);
/** get the output size of pooling
*/
inline int getOutputSize(int H_W, int padding, int kernel_size, int stride)
{
return (H_W + 2 * padding - kernel_size) / stride + 1;
}
/**
* input is [N, H, W, C]
* assume stride == kernel_size
* output_h = (H + 2*padding_H - kernel_H)/stride_H
* output_w = (W + 2*padding_W - kernel_W)/stride_W
* output is [N, output_h, output_w, C]
* grid(N, output_h, output_w)
* block(min(C, 256)) :
* each block deals with C elements of output when each thread deals with ((C + 255)/256 element of output)
*/
template<typename T, bool IS_AVG_POOLING>
__global__ void pooling_nhwc_element1_kernel(T* output,
const T* input,
const int N,
const int H,
const int W,
const int C,
const int output_H,
const int output_W,
const int kernel_H,
const int kernel_W,
const int stride_H,
const int stride_W,
const int padding_H,
const int padding_W)
{
const int tid = threadIdx.x;
const int n_idx = blockIdx.x;
const int output_h_idx = blockIdx.y;
const int output_w_idx = blockIdx.z;
int h_start_idx = output_h_idx * stride_H - padding_H;
int h_end_idx = h_start_idx + kernel_H;
h_start_idx = (h_start_idx < 0) ? 0 : h_start_idx;
h_end_idx = h_end_idx > H ? H : h_end_idx;
int w_start_idx = output_w_idx * stride_W - padding_W;
int w_end_idx = w_start_idx + kernel_W;
w_start_idx = (w_start_idx < 0) ? 0 : w_start_idx;
w_end_idx = w_end_idx > W ? W : w_end_idx;
input += n_idx * H * W * C;
output += ((n_idx * output_H + output_h_idx) * output_W + output_w_idx) * C;
const int kernel_size2 = kernel_H * kernel_W;
for (int c_idx = tid; c_idx < C; c_idx += blockDim.x) {
float pooling;
if (IS_AVG_POOLING){
pooling = 0.0f;
}
else{
pooling = -FLT_MAX;
}
for (int h = h_start_idx; h < h_end_idx; h++) {
for (int w = w_start_idx; w < w_end_idx; w++) {
const int idx = (h * W + w) * C;
const float tmp = static_cast<float>(input[idx + c_idx]);
if (IS_AVG_POOLING){
pooling = pooling + tmp;
}
else{
pooling = pooling > tmp ? pooling : tmp;
}
}
}
T output_val;
if (IS_AVG_POOLING){
output_val = T(pooling/kernel_size2);
}
else{
output_val = T(pooling);
}
output[c_idx] = output_val;
}
}
template<typename T2, typename T, bool IS_AVG_POOLING>
__global__ void pooling_nhwc_element2_kernel(T2* output,
const T2* input,
const int N,
const int H,
const int W,
const int C,
const int output_H,
const int output_W,
const int kernel_H,
const int kernel_W,
const int stride_H,
const int stride_W,
const int padding_H,
const int padding_W)
{
const int tid = threadIdx.x;
const int n_idx = blockIdx.x;
const int output_h_idx = blockIdx.y;
const int output_w_idx = blockIdx.z;
int h_start_idx = output_h_idx * stride_H - padding_H;
int h_end_idx = h_start_idx + kernel_H;
h_start_idx = (h_start_idx < 0) ? 0 : h_start_idx;
h_end_idx = h_end_idx > H ? H : h_end_idx;
int w_start_idx = output_w_idx * stride_W - padding_W;
int w_end_idx = w_start_idx + kernel_W;
w_start_idx = (w_start_idx < 0) ? 0 : w_start_idx;
w_end_idx = w_end_idx > W ? W : w_end_idx;
input += n_idx * H * W * C;
output += ((n_idx * output_H + output_h_idx) * output_W + output_w_idx) * C;
const int kernel_size2 = kernel_H * kernel_W;
for (int c_idx = tid; c_idx < C; c_idx += blockDim.x) {
float2 pooling;
if (IS_AVG_POOLING) {
pooling = {0.0f, 0.0f};
}
else {
pooling = {-FLT_MAX, -FLT_MAX};
}
for (int h = h_start_idx; h < h_end_idx; h++) {
for (int w = w_start_idx; w < w_end_idx; w++) {
const int idx = (h * W + w) * C;
const T2 tmp = input[idx + c_idx];
const float2 tmp_flt2 = {static_cast<float>(tmp.x), static_cast<float>(tmp.y)};
if (IS_AVG_POOLING) {
pooling.x += tmp_flt2.x;
pooling.y += tmp_flt2.y;
}
else {
pooling.x = pooling.x > tmp_flt2.x ? pooling.x : tmp_flt2.x;
pooling.y = pooling.y > tmp_flt2.y ? pooling.y : tmp_flt2.y;
}
}
}
T2 output_val;
if (IS_AVG_POOLING) {
output_val.x = T(pooling.x/kernel_size2);
output_val.y = T(pooling.y/kernel_size2);
}
else {
output_val.x = T(pooling.x);
output_val.y = T(pooling.y);
}
output[c_idx] = output_val;
}
}
/**
* output [N, 1, 1, C]
* input [N, H, W, C]
* grid(C, N)
* block(block_size) -- each block deals with H*W/block_size elements;
*/
template<typename T, bool IS_AVG_POOLING>
__global__ void pooling_nxhTo1x1_element1_kernel(
T* output, const T* input, const int N, const int HW, const int C)
{
const int c_idx = blockIdx.x;
const int n_idx = blockIdx.y;
float pooling[1];
if (IS_AVG_POOLING) {
pooling[0] = 0.0f;
}
else {
pooling[0] = -FLT_MAX;
}
const size_t input_offset = n_idx * HW * C + c_idx;
input += input_offset;
const size_t output_offset = n_idx * C + c_idx;
output += output_offset;
int tid = threadIdx.x;
for (int index = tid; index < HW; index += blockDim.x) {
float val = static_cast<float>(input[index * C]);
if (IS_AVG_POOLING) {
pooling[0] += val;
}
else {
pooling[0] = pooling[0] > val ? pooling[0] : val;
}
}
if (blockDim.x <= 32) {
if (IS_AVG_POOLING) {
warpReduceSum<float, 1>(pooling);
}
else {
warpReduceMax<float, 1>(pooling);
}
}
else {
if (IS_AVG_POOLING) {
blockReduceSum<float, 1>(pooling);
}
else {
blockReduceMax<float, 1>(pooling);
}
}
__syncthreads();
if (threadIdx.x == 0) {
T output_val;
if (IS_AVG_POOLING) {
output_val = T(pooling[0] / HW);
}
else {
output_val = T(pooling[0]);
}
output[0] = output_val;
}
}
/**
* output [N, 1, 1, C]
* input [N, H, W, C]
* grid(C/2, N)
* block(block_size) -- each thread deals with H*W/block_size * 2 elements;
*/
template<typename T2, typename T, bool IS_AVG_POOLING>
__global__ void pooling_nxhTo1x1_element2_kernel(
T2* output, const T2* input, const int N, const int HW, const int C)
{
const int c_idx = blockIdx.x;
const int n_idx = blockIdx.y;
float pooling[2];
if (IS_AVG_POOLING) {
pooling[0] = pooling[1] = 0.0f;
}
else {
pooling[0] = pooling[1] = -FLT_MAX;
}
const int C_2 = C / 2;
const size_t input_offset = n_idx * HW * C_2 + c_idx;
input += input_offset;
const size_t output_offset = n_idx * C_2 + c_idx;
output += output_offset;
int tid = threadIdx.x;
for (int index = tid; index < HW; index += blockDim.x) {
T2 val = input[index * C_2];
float2 val_flt2 = {static_cast<float>(val.x), static_cast<float>(val.y)};
if (IS_AVG_POOLING) {
pooling[0] += val_flt2.x;
pooling[1] += val_flt2.y;
}
else {
pooling[0] = pooling[0] > val_flt2.x ? pooling[0] : val_flt2.x;
pooling[1] = pooling[1] > val_flt2.y ? pooling[1] : val_flt2.y;
}
}
if (blockDim.x <= 32) {
if (IS_AVG_POOLING) {
warpReduceSum<float, 2>(pooling);
}
else {
warpReduceMax<float, 2>(pooling);
}
}
else {
if (IS_AVG_POOLING) {
blockReduceSum<float, 2>(pooling);
}
else {
blockReduceMax<float, 2>(pooling);
}
}
__syncthreads();
if (threadIdx.x == 0) {
T2 output_val;
if (IS_AVG_POOLING) {
output_val.x = T(pooling[0] / HW);
output_val.y = T(pooling[1] / HW);
}
else {
output_val.x = T(pooling[0]);
output_val.y = T(pooling[1]);
}
output[0] = output_val;
}
}
template <typename T>
void pooling_nhwc(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord filter_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
cutlass::Tensor4DCoord padding,
cutlass::MatrixCoord stride,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNHWC> ref_output,
int poolingType, //0 for avg pooling ; 1 for max pooling
cudaStream_t stream) {
assert(input_tensor_size.n() == output_tensor_size.n() &&
input_tensor_size.c() == output_tensor_size.c());
assert(filter_tensor_size.h() == stride.row() &&
filter_tensor_size.w() == stride.column());
const int N = input_tensor_size.n();
const int H = input_tensor_size.h();
const int W = input_tensor_size.w();
const int C = input_tensor_size.c();
const int padding_H = padding.h();
const int padding_W = padding.w();
const int kernel_H = filter_tensor_size.h();
const int kernel_W = filter_tensor_size.w();
const int stride_H = stride.row();
const int stride_W = stride.column();
const int output_H = getOutputSize(H, padding_H, kernel_H, stride_H);
const int output_W = getOutputSize(W, padding_W, kernel_W, stride_W);
assert(output_tensor_size.h() == output_H &&
output_tensor_size.w() == output_W);
if (C % 2 != 0) {
if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) {
dim3 grid(C, N);
dim3 block(256);
if (H*W < block.x){
block.x = (H*W + 31)/32*32;
}
if (poolingType == 0) {
pooling_nxhTo1x1_element1_kernel<T, true><<<grid, block, 0, stream>>>(
ref_output.data(),
ref_input.data(),
N,
H*W,
C);
} // if (poolingType == 0)
else {
pooling_nxhTo1x1_element1_kernel<T, false><<<grid, block, 0, stream>>>(
ref_output.data(),
ref_input.data(),
N,
H*W,
C);
}
} // if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0))
else {
dim3 grid(N, output_H, output_W);
dim3 block(256);
if (C < block.x) {
block.x = C;
}
if (poolingType == 0) {
pooling_nhwc_element1_kernel<T, true><<<grid, block, 0, stream>>>(
ref_output.data(),
ref_input.data(),
N,
H,
W,
C,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
} // if (poolingType == 0)
else {
pooling_nhwc_element1_kernel<T, false><<<grid, block, 0, stream>>>(
ref_output.data(),
ref_input.data(),
N,
H,
W,
C,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
}
}
} // if (C % 2 != 0))
else {
if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) {
dim3 grid(C/2, N);
dim3 block(256);
if (H*W < block.x){
block.x = (H*W + 31)/32*32;
}
if (poolingType == 0) {
if (std::is_same<T, float>::value) {
pooling_nxhTo1x1_element2_kernel<float2, float, true><<<grid, block, 0, stream>>>(
(float2*)(ref_output.data()),
(const float2*)(ref_input.data()),
N,
H*W,
C);
} // if (std::is_same<T, float>::value)
else {
pooling_nxhTo1x1_element2_kernel<half2, half, true><<<grid, block, 0, stream>>>(
(half2*)(ref_output.data()),
(const half2*)(ref_input.data()),
N,
H*W,
C);
}
} // if (poolingType == 0)
else {
if (std::is_same<T, float>::value) {
pooling_nxhTo1x1_element2_kernel<float2, float, false><<<grid, block, 0, stream>>>(
(float2*)(ref_output.data()),
(const float2*)(ref_input.data()),
N,
H*W,
C);
} // if (std::is_same<T, float>::value)
else {
pooling_nxhTo1x1_element2_kernel<half2, half, false><<<grid, block, 0, stream>>>(
(half2*)(ref_output.data()),
(const half2*)(ref_input.data()),
N,
H*W,
C);
}
}
} // if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0))
else {
dim3 grid(N, output_H, output_W);
dim3 block(256);
if (C/2 < block.x) {
block.x = C/2;
}
if (poolingType == 0) {
if (std::is_same<T, float>::value) {
pooling_nhwc_element2_kernel<float2, float, true><<<grid, block, 0, stream>>>(
(float2*)(ref_output.data()),
(const float2*)(ref_input.data()),
N,
H,
W,
C/2,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
} // if (std::is_same<T, float>::value)
else {
pooling_nhwc_element2_kernel<half2, half, true><<<grid, block, 0, stream>>>(
(half2*)(ref_output.data()),
(const half2*)(ref_input.data()),
N,
H,
W,
C/2,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
}
} // if (poolingType == 0)
else {
if (std::is_same<T, float>::value) {
pooling_nhwc_element2_kernel<float2, float, false><<<grid, block, 0, stream>>>(
(float2*)(ref_output.data()),
(const float2*)(ref_input.data()),
N,
H,
W,
C/2,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
} // if (std::is_same<T, float>::value)
else {
pooling_nhwc_element2_kernel<half2, half, false><<<grid, block, 0, stream>>>(
(half2*)(ref_output.data()),
(const half2*)(ref_input.data()),
N,
H,
W,
C/2,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
}
}
}
}
}
} //namespace cutlass
| tools/util/include/cutlass/util/device_nhwc_pooling.h/0 | {
"file_path": "tools/util/include/cutlass/util/device_nhwc_pooling.h",
"repo_id": "tools",
"token_count": 9927
} | 53 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for CONV in host-side code.
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/complex.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cute/tensor.hpp"
#include <cuda_runtime.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::reference::host {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template<class EngineAct, class LayoutAct>
bool
is_activation_in_bounds(
cute::Tensor<EngineAct, LayoutAct> const& activation,
int32_t n_, int32_t d_, int32_t h_, int32_t w_, int32_t c_) {
return ((n_ >= 0 && n_ < size<4>(activation)) &&
(d_ >= 0 && d_ < size<3>(activation)) &&
(h_ >= 0 && h_ < size<2>(activation)) &&
(w_ >= 0 && w_ < size<1>(activation)) &&
(c_ >= 0 && c_ < size<0>(activation)));
}
template<class EngineAct, class LayoutAct>
bool
is_activation_in_bounds(
cute::Tensor<EngineAct, LayoutAct> const& activation,
int32_t n_, int32_t h_, int32_t w_, int32_t c_) {
return ((n_ >= 0 && n_ < size<3>(activation)) &&
(h_ >= 0 && h_ < size<2>(activation)) &&
(w_ >= 0 && w_ < size<1>(activation)) &&
(c_ >= 0 && c_ < size<0>(activation)));
}
template<class EngineAct, class LayoutAct>
bool
is_activation_in_bounds(
cute::Tensor<EngineAct, LayoutAct> const& activation,
int32_t n_, int32_t w_, int32_t c_) {
return ((n_ >= 0 && n_ < size<2>(activation)) &&
(w_ >= 0 && w_ < size<1>(activation)) &&
(c_ >= 0 && c_ < size<0>(activation)));
}
} // namespace detail
template<
class ElementAcc_,
class ElementScalar_,
class ElementCompute_,
class ElementC_,
class ElementOut_,
class TensorAlpha_,
class TensorBeta_,
class TensorBias_,
class ActivationFunctor_ = cutlass::epilogue::thread::Identity<ElementCompute_>
>
struct ConvEpilogueFusionParams {
using ElementAcc = ElementAcc_;
using ElementScalar = ElementScalar_;
using ElementCompute = ElementCompute_;
using ElementC = ElementC_;
using ElementOut = ElementOut_;
using TensorAlpha = TensorAlpha_;
using TensorBeta = TensorBeta_;
using TensorBias = TensorBias_;
using ActivationFunctor = ActivationFunctor_;
ElementScalar alpha = ElementScalar(1);
ElementScalar beta = ElementScalar(0);
TensorAlpha tensor_alpha{};
TensorBeta tensor_beta{};
TensorBias tensor_bias{};
};
template<
cutlass::conv::Operator ConvOp,
int NumSpatialDims,
class TensorA,
class TensorB,
class TensorC,
class TensorD,
class ShapePadding,
class StrideTraversal,
class ShapeDilation,
class EpilogueFusionParams
>
struct ConvReferenceImpl {
using ElementAcc = typename EpilogueFusionParams::ElementAcc;
using ElementC = typename EpilogueFusionParams::ElementC;
using ElementOut = typename EpilogueFusionParams::ElementOut;
using ElementScalar = typename EpilogueFusionParams::ElementScalar;
using ElementCompute = typename EpilogueFusionParams::ElementCompute;
using ElementBias = typename EpilogueFusionParams::TensorBias::value_type;
using ActivationFunctor = typename EpilogueFusionParams::ActivationFunctor;
// Input related converter
NumericConverter<ElementCompute, ElementAcc> acc_converter;
NumericConverter<ElementCompute, ElementC> residual_converter;
NumericConverter<ElementCompute, ElementBias> bias_converter;
// Scale related converter
NumericConverter<ElementCompute, ElementScalar> scale_converter;
// Output related converter
NumericConverter<ElementOut, ElementCompute> output_converter;
EpilogueFusionParams& epi_fusion_params_;
TensorA const& tensor_a_;
TensorB const& tensor_b_;
TensorC const& tensor_c_;
TensorD& tensor_d_;
ShapePadding const& padding_;
StrideTraversal const& tstride_;
ShapeDilation const& dilation_;
// Epilogue activation operation
ActivationFunctor epi_activation;
ConvReferenceImpl(
TensorA const& tensor_a,
TensorB const& tensor_b,
TensorC const& tensor_c,
TensorD& tensor_d,
ShapePadding const& padding,
StrideTraversal const& tstride,
ShapeDilation const& dilation,
EpilogueFusionParams& epi_fusion_params)
: tensor_a_(tensor_a),
tensor_b_(tensor_b),
tensor_c_(tensor_c),
tensor_d_(tensor_d),
padding_(padding),
tstride_(tstride),
dilation_(dilation),
epi_fusion_params_(epi_fusion_params)
{
static_assert(rank(ShapePadding{}) == rank(ShapeDilation{}));
static_assert(rank(ShapePadding{}) == rank(StrideTraversal{}));
}
void compute_reference() {
if constexpr (ConvOp == cutlass::conv::Operator::kFprop) {
fprop_reference(cute::Int<NumSpatialDims>{});
}
else if constexpr (ConvOp == cutlass::conv::Operator::kDgrad) {
dgrad_reference(cute::Int<NumSpatialDims>{});
}
else {
wgrad_reference(cute::Int<NumSpatialDims>{});
}
}
private:
// Specialization for 1D fprop kernel
void fprop_reference(cute::Int<1> spatial_dims) {
int32_t N = size<2>(tensor_d_);
int32_t Q = size<1>(tensor_d_);
int32_t K = size<0>(tensor_d_);
int32_t S = size<1>(tensor_b_);
int32_t C = size<0>(tensor_b_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(2)
#endif
for (int32_t n = 0; n < N; ++n) {
for (int32_t q = 0; q < Q; ++q) {
for (int32_t k = 0; k < K; ++k) {
auto accumulator = ElementAcc(0);
for (int32_t s = 0; s < S; ++s) {
for (int32_t c = 0; c < C; ++c) {
int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_);
if (detail::is_activation_in_bounds(tensor_a_, n, w, c)) {
auto a = tensor_a_(c, w, n);
auto b = tensor_b_(c, s, k);
accumulator += ElementAcc(a * b);
}
}
}
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ?
epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ?
epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta;
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(k, q, n));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[k]);
}
output = epi_activation(output);
tensor_d_(k, q, n) = output_converter(output);
}
}
}
}
// Specialization for 2D fprop kernel
void fprop_reference(cute::Int<2> spatial_dims) {
int32_t N = size<3>(tensor_d_);
int32_t P = size<2>(tensor_d_);
int32_t Q = size<1>(tensor_d_);
int32_t K = size<0>(tensor_d_);
int32_t R = size<2>(tensor_b_);
int32_t S = size<1>(tensor_b_);
int32_t C = size<0>(tensor_b_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int32_t n = 0; n < N; ++n) {
for (int32_t p = 0; p < P; ++p) {
for (int32_t q = 0; q < Q; ++q) {
for (int32_t k = 0; k < K; ++k) {
auto accumulator = ElementAcc(0);
for (int32_t r = 0; r < R; ++r) {
for (int32_t s = 0; s < S; ++s) {
for (int32_t c = 0; c < C; ++c) {
int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_);
int32_t h = p * cute::get<1>(tstride_) - cute::get<1>(padding_) + r * cute::get<1>(dilation_);
if (detail::is_activation_in_bounds(tensor_a_, n, h, w, c)) {
auto a = tensor_a_(c, w, h, n);
auto b = tensor_b_(c, s, r, k);
accumulator += ElementAcc(a * b);
}
}
}
}
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ?
epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ?
epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta;
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(k, q, p, n));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[k]);
}
output = epi_activation(output);
tensor_d_(k, q, p, n) = output_converter(output);
}
}
}
}
}
// Specialization for 3D fprop kernel
void fprop_reference(cute::Int<3> spatial_dims) {
int32_t N = size<4>(tensor_d_);
int32_t Z = size<3>(tensor_d_);
int32_t P = size<2>(tensor_d_);
int32_t Q = size<1>(tensor_d_);
int32_t K = size<0>(tensor_d_);
int32_t T = size<3>(tensor_b_);
int32_t R = size<2>(tensor_b_);
int32_t S = size<1>(tensor_b_);
int32_t C = size<0>(tensor_b_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int32_t n = 0; n < N; ++n) {
for (int32_t z = 0; z < Z; ++z) {
for (int32_t p = 0; p < P; ++p) {
for (int32_t q = 0; q < Q; ++q) {
for (int32_t k = 0; k < K; ++k) {
auto accumulator = ElementAcc(0);
for (int32_t t = 0; t < T; ++t) {
for (int32_t r = 0; r < R; ++r) {
for (int32_t s = 0; s < S; ++s) {
for (int32_t c = 0; c < C; ++c) {
int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_);
int32_t h = p * cute::get<1>(tstride_) - cute::get<1>(padding_) + r * cute::get<1>(dilation_);
int32_t d = z * cute::get<2>(tstride_) - cute::get<2>(padding_) + t * cute::get<2>(dilation_);
if (detail::is_activation_in_bounds(tensor_a_, n, d, h, w, c)) {
auto a = tensor_a_(c, w, h, d, n);
auto b = tensor_b_(c, s, r, t, k);
accumulator += ElementAcc(a * b);
}
}
}
}
}
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ?
epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ?
epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta;
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(k, q, p, z, n));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[k]);
}
output = epi_activation(output);
tensor_d_(k, q, p, z, n) = output_converter(output);
}
}
}
}
}
}
// Specialization for 1D dgrad kernel
void dgrad_reference(cute::Int<1> spatial_dims) {
int32_t N = size<2>(tensor_d_);
int32_t W = size<1>(tensor_d_);
int32_t C = size<0>(tensor_d_);
int32_t K = size<2>(tensor_b_);
int32_t S = size<1>(tensor_b_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(2)
#endif
for (int32_t n = 0; n < N; ++n) {
for (int32_t w = 0; w < W; ++w) {
for (int32_t c = 0; c < C; ++c) {
auto accumulator = ElementAcc(0);
for (int32_t k = 0; k < K; ++k) {
for (int32_t s = 0; s < S; ++s) {
int32_t q = w + cute::get<0>(padding_) - s * cute::get<0>(dilation_);
if (q % cute::get<0>(tstride_) == 0) {
q /= cute::get<0>(tstride_);
} else {
continue;
}
if (detail::is_activation_in_bounds(tensor_a_, n, q, k)) {
accumulator += ElementAcc(tensor_a_(k, q, n) * tensor_b_(c, s, k));
}
}
}
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data())
? epi_fusion_params_.tensor_alpha[c] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data())
? epi_fusion_params_.tensor_beta[c] : epi_fusion_params_.beta;
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(c, w, n));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[c]);
}
output = epi_activation(output);
tensor_d_(c, w, n) = output_converter(output);
}
}
}
}
// Specialization for 2D dgrad kernel
void dgrad_reference(cute::Int<2> spatial_dims) {
int32_t N = size<3>(tensor_d_);
int32_t H = size<2>(tensor_d_);
int32_t W = size<1>(tensor_d_);
int32_t C = size<0>(tensor_d_);
int32_t K = size<3>(tensor_b_);
int32_t R = size<2>(tensor_b_);
int32_t S = size<1>(tensor_b_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int32_t n = 0; n < N; ++n) {
for (int32_t h = 0; h < H; ++h) {
for (int32_t w = 0; w < W; ++w) {
for (int32_t c = 0; c < C; ++c) {
auto accumulator = ElementAcc(0);
for (int32_t k = 0; k < K; ++k) {
for (int32_t r = 0; r < R; ++r) {
for (int32_t s = 0; s < S; ++s) {
int32_t q = w + cute::get<0>(padding_) - s * cute::get<0>(dilation_);
int32_t p = h + cute::get<1>(padding_) - r * cute::get<1>(dilation_);
if (q % cute::get<0>(tstride_) == 0) {
q /= cute::get<0>(tstride_);
} else {
continue;
}
if (p % cute::get<1>(tstride_) == 0) {
p /= cute::get<1>(tstride_);
} else {
continue;
}
if (detail::is_activation_in_bounds(tensor_a_, n, p, q, k)) {
accumulator += ElementAcc(tensor_a_(k, q, p, n) * tensor_b_(c, s, r, k));
}
}
}
}
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data())
? epi_fusion_params_.tensor_alpha[c] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data())
? epi_fusion_params_.tensor_beta[c] : epi_fusion_params_.beta;
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(c, w, h, n));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[c]);
}
output = epi_activation(output);
tensor_d_(c, w, h, n) = output_converter(output);
}
}
}
}
}
// Specialization for 3D dgrad kernel
void dgrad_reference(cute::Int<3> spatial_dims) {
int32_t N = size<4>(tensor_d_);
int32_t D = size<3>(tensor_d_);
int32_t H = size<2>(tensor_d_);
int32_t W = size<1>(tensor_d_);
int32_t C = size<0>(tensor_d_);
int32_t K = size<4>(tensor_b_);
int32_t T = size<3>(tensor_b_);
int32_t R = size<2>(tensor_b_);
int32_t S = size<1>(tensor_b_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int32_t n = 0; n < N; ++n) {
for (int32_t d = 0; d < D; ++d) {
for (int32_t h = 0; h < H; ++h) {
for (int32_t w = 0; w < W; ++w) {
for (int32_t c = 0; c < C; ++c) {
auto accumulator = ElementAcc(0);
for (int32_t k = 0; k < K; ++k) {
for (int32_t t = 0; t < T; ++t) {
for (int32_t r = 0; r < R; ++r) {
for (int32_t s = 0; s < S; ++s) {
int32_t q = w + cute::get<0>(padding_) - s * cute::get<0>(dilation_);
int32_t p = h + cute::get<1>(padding_) - r * cute::get<1>(dilation_);
int32_t z = d + cute::get<2>(padding_) - t * cute::get<2>(dilation_);
if (q % cute::get<0>(tstride_) == 0) {
q /= cute::get<0>(tstride_);
} else {
continue;
}
if (p % cute::get<1>(tstride_) == 0) {
p /= cute::get<1>(tstride_);
} else {
continue;
}
if (z % cute::get<2>(tstride_) == 0) {
z /= cute::get<2>(tstride_);
} else {
continue;
}
if (detail::is_activation_in_bounds(tensor_a_, n, z, p, q, k)) {
accumulator += ElementAcc(tensor_a_(k, q, p, z, n) * tensor_b_(c, s, r, t, k));
}
}
}
}
}
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data())
? epi_fusion_params_.tensor_alpha[c] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data())
? epi_fusion_params_.tensor_beta[c] : epi_fusion_params_.beta;
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(c, w, h, d, n));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[c]);
}
output = epi_activation(output);
tensor_d_(c, w, h, d, n) = output_converter(output);
}
}
}
}
}
}
// Specialization for 1D wgrad kernel
void wgrad_reference(cute::Int<1> spatial_dims) {
int32_t N =
size<2>(tensor_a_);
int32_t Q =
size<1>(tensor_a_);
int32_t K =
size<0>(tensor_a_);
int32_t S = size<1>(tensor_d_);
int32_t C = size<0>(tensor_d_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(2)
#endif
for (int32_t k = 0; k < K; ++k) {
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ?
epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ?
epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta;
for (int32_t s = 0; s < S; ++s) {
for (int32_t c = 0; c < C; ++c) {
auto accumulator = ElementAcc(0);
for (int32_t n = 0; n < N; ++n) {
for (int32_t q = 0; q < Q; ++q) {
int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_);
bool is_in_bounds =
detail::is_activation_in_bounds(tensor_b_, n, w, c);
if (is_in_bounds) {
auto act =
tensor_b_(c, w, n);
auto xformed_act =
tensor_a_(k, q, n);
accumulator += ElementAcc(act * xformed_act);
}
}
}
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(c, s, k));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[k]);
}
output = epi_activation(output);
tensor_d_(c, s, k) = output_converter(output);
}
}
}
}
// Specialization for 2D wgrad kernel
void wgrad_reference(cute::Int<2> spatial_dims) {
int32_t N =
size<3>(tensor_a_);
int32_t P =
size<2>(tensor_a_);
int32_t Q =
size<1>(tensor_a_);
int32_t K =
size<0>(tensor_a_);
int32_t R = size<2>(tensor_d_);
int32_t S = size<1>(tensor_d_);
int32_t C = size<0>(tensor_d_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int32_t k = 0; k < K; ++k) {
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ?
epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ?
epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta;
for (int32_t r = 0; r < R; ++r) {
for (int32_t s = 0; s < S; ++s) {
for (int32_t c = 0; c < C; ++c) {
auto accumulator = ElementAcc(0);
for (int32_t n = 0; n < N; ++n) {
for (int32_t p = 0; p < P; ++p) {
for (int32_t q = 0; q < Q; ++q) {
int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_);
int32_t h = p * cute::get<1>(tstride_) - cute::get<1>(padding_) + r * cute::get<1>(dilation_);
bool is_in_bounds =
detail::is_activation_in_bounds(tensor_b_, n, h, w, c);
if (is_in_bounds) {
auto act =
tensor_b_(c, w, h, n);
auto xformed_act =
tensor_a_(k, q, p, n);
accumulator += ElementAcc(act * xformed_act);
}
}
}
}
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(c, s, r, k));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[k]);
}
output = epi_activation(output);
tensor_d_(c, s, r, k) = output_converter(output);
}
}
}
}
}
// Specialization for 3D wgrad kernel
void wgrad_reference(cute::Int<3> spatial_dims) {
int32_t N =
size<4>(tensor_a_);
int32_t Z =
size<3>(tensor_a_);
int32_t P =
size<2>(tensor_a_);
int32_t Q =
size<1>(tensor_a_);
int32_t K =
size<0>(tensor_a_);
int32_t T = size<3>(tensor_d_);
int32_t R = size<2>(tensor_d_);
int32_t S = size<1>(tensor_d_);
int32_t C = size<0>(tensor_d_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int32_t k = 0; k < K; ++k) {
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ?
epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ?
epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta;
for (int32_t t = 0; t < T; ++t) {
for (int32_t r = 0; r < R; ++r) {
for (int32_t s = 0; s < S; ++s) {
for (int32_t c = 0; c < C; ++c) {
auto accumulator = ElementAcc(0);
for (int32_t n = 0; n < N; ++n) {
for (int32_t z = 0; z < Z; ++z) {
for (int32_t p = 0; p < P; ++p) {
for (int32_t q = 0; q < Q; ++q) {
int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_);
int32_t h = p * cute::get<1>(tstride_) - cute::get<1>(padding_) + r * cute::get<1>(dilation_);
int32_t d = z * cute::get<2>(tstride_) - cute::get<2>(padding_) + t * cute::get<2>(dilation_);
bool is_in_bounds =
detail::is_activation_in_bounds(tensor_b_, n, d, h, w, c);
if (is_in_bounds) {
auto act =
tensor_b_(c, w, h, d, n);
auto xformed_act =
tensor_a_(k, q, p, z, n);
accumulator += ElementAcc(act * xformed_act);
}
}
}
}
}
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(c, s, r, t, k));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[k]);
}
output = epi_activation(output);
tensor_d_(c, s, r, t, k) = output_converter(output);
}
}
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // cutlass::reference::host
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/host/conv.hpp/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/conv.hpp",
"repo_id": "tools",
"token_count": 14121
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Provides several functions for filling tensors with data.
*/
#pragma once
// Standard Library includes
#include <utility>
#include <cstdlib>
#include <cmath>
#include <random>
#include <stdexcept>
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/quaternion.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/subbyte_reference.h"
#include "cutlass/tensor_view.h"
#include "cutlass/tensor_view_planar_complex.h"
#include "cutlass/blas3.h"
#include "cutlass/util/distribution.h"
#include "tensor_foreach.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
Element value;
//
// Methods
//
TensorFillFunc(
TensorView const &view_ = TensorView(),
Element value_ = Element(0)
): view(view_), value(value_) { }
void operator()(Coord<Layout::kRank> const & coord) const {
view.at(coord) = value;
}
};
/// Returns a pair of values of the Gaussian distribution generated by the Box Muller method
struct BoxMullerFunc {
BoxMullerFunc() {}
void operator()(
double* rnd, ///< Size-2 vector to be filled with random values
double mean = 0, ///< Mean of the Gaussian distribution
double stddev = 1, ///< Standard deviation of the Gaussian distribution
double pi = std::acos(-1)) const {
double u1 = double(std::rand()) / double(RAND_MAX);
double u2 = double(std::rand()) / double(RAND_MAX);
rnd[0] = std::sqrt(-2 * std::log(u1)) * std::cos(2 * pi * u2);
rnd[1] = std::sqrt(-2 * std::log(u1)) * std::sin(2 * pi * u2);
rnd[0] = mean + stddev * rnd[0];
rnd[1] = mean + stddev * rnd[1];
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with a uniform value
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFill(
TensorView<Element, Layout> dst, ///< destination tensor
Element val = Element(0)) { ///< value to uniformly fill it with
detail::TensorFillFunc<Element, Layout> func(dst, val);
TensorForEach(
dst.extent(),
func
);
}
/// Fills a tensor with a uniform value
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFill(
TensorViewPlanarComplex<Element, Layout> dst, ///< destination tensor
cutlass::complex<Element> val = cutlass::complex<Element>(0)) { ///< value to uniformly fill it with
TensorFill(dst.view_real(), val.real());
TensorFill(dst.view_imag(), val.imag());
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element>
struct RandomGaussianFunc {
uint64_t seed;
double mean;
double stddev;
int int_scale;
double pi;
double pnz;
//
// Methods
//
RandomGaussianFunc(
uint64_t seed_ = 0,
double mean_ = 0,
double stddev_ = 1,
int int_scale_ = -1,
double pnz_ = 100.0
):
seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)), pnz(pnz_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Element operator()() const {
// Box-Muller transform to generate random numbers with Normal distribution
double u1 = double(std::rand()) / double(RAND_MAX);
double u2 = double(std::rand()) / double(RAND_MAX);
// Compute Gaussian random value
double rnd = std::sqrt(-2 * std::log(u1)) * std::cos(2 * pi * u2);
rnd = mean + stddev * rnd;
// Scale and convert final result
Element result;
// Sample from the Bernoulli distribution, and use the result to sample from the Gaussian
std::random_device rnd_device;
std::mt19937 bernoulli_rnd(rnd_device());
std::bernoulli_distribution bernoulli_dist(pnz / 100);
bool bernoulli_result = bernoulli_dist(bernoulli_rnd);
// Sample from the Gaussian distribution for a nonzero element
if (bernoulli_result) {
if (int_scale >= 0) {
rnd = double(std::llround(rnd * double(1 << int_scale))) / double(1 << int_scale);
result = static_cast<Element>(rnd);
}
else {
result = static_cast<Element>(rnd);
}
}
else {
result = static_cast<Element>(0);
}
return result;
}
};
/// Partial specialization for initializing a complex value.
template <typename Element>
struct RandomGaussianFunc<complex<Element> > {
uint64_t seed;
double mean;
double stddev;
int int_scale;
double pi;
double pnz;
//
// Methods
//
RandomGaussianFunc(
uint64_t seed_ = 0,
double mean_ = 0,
double stddev_ = 1,
int int_scale_ = -1,
double pnz_ = 100.0
):
seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)), pnz(pnz_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
complex<Element> operator()() const {
Element reals[2];
double rnd[2];
detail::BoxMullerFunc func;
func(rnd, mean, stddev, pi);
// Sample from the Bernoulli distribution, and use the result to sample from the Gaussian
std::random_device rnd_device;
std::mt19937 bernoulli_rnd(rnd_device());
std::bernoulli_distribution bernoulli_dist(pnz / 100);
bool bernoulli_result = bernoulli_dist(bernoulli_rnd);
// Sample from the Gaussian distribution for a nonzero element
if (bernoulli_result) {
if (int_scale >= 0) {
rnd[0] = double(int(rnd[0] * double(1 << int_scale)));
rnd[1] = double(int(rnd[1] * double(1 << int_scale)));
reals[0] = from_real<Element>(rnd[0] / double(1 << int_scale));
reals[1] = from_real<Element>(rnd[1] / double(1 << int_scale));
}
else {
reals[0] = from_real<Element>(rnd[0]);
reals[1] = from_real<Element>(rnd[1]);
}
}
else {
reals[0] = from_real<Element>(0);
reals[1] = from_real<Element>(0);
}
return complex<Element>(reals[0], reals[1]);
}
};
/// Partial specialization for initializing a complex value.
template <typename Element>
struct RandomGaussianFunc<Quaternion<Element> > {
uint64_t seed;
double mean;
double stddev;
int int_scale;
double pi;
double pnz;
//
// Methods
//
RandomGaussianFunc(
uint64_t seed_ = 0,
double mean_ = 0,
double stddev_ = 1,
int int_scale_ = -1,
double pnz_ = 100.0
):
seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)), pnz(pnz_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Quaternion<Element> operator()() const {
Element reals[4];
double rnd1[2];
double rnd2[2];
detail::BoxMullerFunc func;
func(rnd1, mean, stddev, pi);
func(rnd2, mean, stddev, pi);
// Sample from the Bernoulli distribution, and use the result to sample from the Gaussian
std::random_device rnd_device;
std::mt19937 bernoulli_rnd(rnd_device());
std::bernoulli_distribution bernoulli_dist(pnz / 100);
bool bernoulli_result = bernoulli_dist(bernoulli_rnd);
// Sample from the Gaussian distribution for a nonzero element
if (bernoulli_result) {
if (int_scale >= 0) {
rnd1[0] = double(int(rnd1[0] * double(1 << int_scale)));
rnd1[1] = double(int(rnd1[1] * double(1 << int_scale)));
rnd2[0] = double(int(rnd2[0] * double(1 << int_scale)));
rnd2[1] = double(int(rnd2[1] * double(1 << int_scale)));
reals[0] = from_real<Element>(rnd1[0] / double(1 << int_scale));
reals[1] = from_real<Element>(rnd1[1] / double(1 << int_scale));
reals[2] = from_real<Element>(rnd2[0] / double(1 << int_scale));
reals[3] = from_real<Element>(rnd2[1] / double(1 << int_scale));
}
else {
reals[0] = from_real<Element>(rnd1[0]);
reals[1] = from_real<Element>(rnd1[1]);
reals[2] = from_real<Element>(rnd2[0]);
reals[3] = from_real<Element>(rnd2[1]);
}
}
else {
reals[0] = from_real<Element>(0);
reals[1] = from_real<Element>(0);
reals[2] = from_real<Element>(0);
reals[3] = from_real<Element>(0);
}
return Quaternion<Element>(reals[0], reals[1], reals[2], reals[3]);
}
};
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillGaussianFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomGaussianFunc<Element> func;
//
// Methods
//
/// Construction of Gaussian RNG functor.
TensorFillGaussianFunc(
TensorView view_ = TensorView(),
RandomGaussianFunc<Element> func_ = RandomGaussianFunc<Element>()
):
view(view_), func(func_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
view.at(coord) = func();
}
};
/// Computes a random Gaussian distribution for a rank-2 tensor
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillSymmetricGaussianFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomGaussianFunc<Element> func;
cutlass::FillMode fill_mode;
//
// Methods
//
/// Construction of Gaussian RNG functor.
TensorFillSymmetricGaussianFunc(
TensorView view_ = TensorView(),
RandomGaussianFunc<Element> func_ = RandomGaussianFunc<Element>(),
cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid
):
view(view_), func(func_), fill_mode(fill_mode_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
// Fill half of matrix based on FillMode
if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kLower &&
coord[0] >= coord[1]) {
view.at(coord) = func();
} else if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kUpper &&
coord[0] <= coord[1]) {
view.at(coord) = func();
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a Gaussian distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomGaussian(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double mean = 0, ///< Gaussian distribution's mean
double stddev = 1, ///< Gaussian distribution's standard deviation
int bits = -1, ///< If non-negative, specifies number of fractional bits that
double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomGaussianFunc<Element> random_func(seed, mean, stddev, bits, pnz);
detail::TensorFillGaussianFunc<Element, Layout> func(
dst,
random_func
);
TensorForEach(
dst.extent(),
func
);
}
/// Fills a tensor with random values with a Gaussian distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomGaussian(
TensorViewPlanarComplex<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double mean = 0, ///< Gaussian distribution's mean
double stddev = 1, ///< Gaussian distribution's standard deviation
int bits = -1, ///< If non-negative, specifies number of fractional bits that
double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of
/// data.
TensorFillRandomGaussian(dst.view_real(), seed, mean, stddev, bits, pnz);
TensorFillRandomGaussian(dst.view_imag(), ~seed, mean, stddev, bits, pnz);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills the upper or lower part of a symmetric rank-2 tensor with random values of a Gaussian distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillSymmetricRandomGaussian(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices
double mean = 0, ///< Gaussian distribution's mean
double stddev = 1, ///< Gaussian distribution's standard deviation
int bits = -1, ///< If non-negative, specifies number of fractional bits that
double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomGaussianFunc<Element> random_func(seed, mean, stddev, bits, pnz);
detail::TensorFillSymmetricGaussianFunc<Element, Layout> func(
dst,
random_func,
fill_mode
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values of a Gaussian distribution.
template <
typename Element ///< Element type
>
void BlockFillRandomGaussian(
Element *ptr, ///< destination buffer
size_t capacity, ///< number of elements
uint64_t seed, ///< seed for RNG
double mean = 0, ///< Gaussian distribution's mean
double stddev = 1, ///< Gaussian distribution's standard deviation
int bits = -1, ///< If non-negative, specifies number of fractional bits that
double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomGaussianFunc<Element> random_func(seed, mean, stddev, bits, pnz);
for (size_t i = 0; i < capacity; ++i) {
ReferenceFactory<Element>::get(ptr, i) = random_func();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element>
struct RandomUniformFunc {
using Real = typename RealType<Element>::Type;
uint64_t seed;
double range;
double min;
int int_scale;
//
// Methods
//
RandomUniformFunc(
uint64_t seed_ = 0,
double max = 1,
double min_ = 0,
int int_scale_ = -1
):
seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Element operator()() const {
double rnd = double(std::rand()) / double(RAND_MAX);
rnd = min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
Element result;
if (int_scale >= 0) {
rnd = double(std::llround(rnd * double(1 << int_scale))) / double(1 << int_scale);
result = static_cast<Element>(Real(rnd));
}
else {
result = static_cast<Element>(Real(rnd));
}
return result;
}
};
/// Partial specialization for initializing a complex value.
template <typename Element>
struct RandomUniformFunc<complex<Element> > {
using Real = typename RealType<Element>::Type;
uint64_t seed;
double range;
double min;
int int_scale;
//
// Methods
//
RandomUniformFunc(
uint64_t seed_ = 0,
double max = 1,
double min_ = 0,
int int_scale_ = -1
):
seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
complex<Element> operator()() const {
Element reals[2];
for (int i = 0; i < 2; ++i) {
double rnd = double(std::rand()) / double(RAND_MAX);
rnd = min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
if (int_scale >= 0) {
rnd = double(int(rnd * double(1 << int_scale)));
reals[i] = from_real<Element>(Real(rnd / double(1 << int_scale)));
}
else {
reals[i] = from_real<Element>(Real(rnd));
}
}
return complex<Element>(reals[0], reals[1]);
}
};
/// Partial specialization for initializing a Quaternion value.
template <typename Element>
struct RandomUniformFunc<Quaternion<Element> > {
using Real = typename RealType<Element>::Type;
uint64_t seed;
double range;
double min;
int int_scale;
//
// Methods
//
RandomUniformFunc(
uint64_t seed_ = 0,
double max = 1,
double min_ = 0,
int int_scale_ = -1
):
seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Quaternion<Element> operator()() const {
Element reals[4];
for (int i = 0; i < 4; ++i) {
double rnd = double(std::rand()) / double(RAND_MAX);
rnd = min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
if (int_scale >= 0) {
rnd = double(int(rnd * double(1 << int_scale)));
reals[i] = from_real<Element>(Real(rnd / double(1 << int_scale)));
}
else {
reals[i] = from_real<Element>(Real(rnd));
}
}
return make_Quaternion(reals[0], reals[1], reals[2], reals[3]);
}
};
/// Computes a random uniform distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillRandomUniformFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomUniformFunc<Element> func;
//
// Methods
//
/// Construction of uniform RNG functor.
TensorFillRandomUniformFunc(
TensorView view_ = TensorView(),
RandomUniformFunc<Element> func_ = RandomUniformFunc<Element>()
):
view(view_), func(func_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
view.at(coord) = func();
}
};
/// Fills the upper or lower part of a symmetric rank-2 tensor with random values of a uniform distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillSymmetricRandomUniformFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomUniformFunc<Element> func;
cutlass::FillMode fill_mode;
//
// Methods
//
/// Construction of uniform RNG functor.
TensorFillSymmetricRandomUniformFunc(
TensorView view_ = TensorView(),
RandomUniformFunc<Element> func_ = RandomUniformFunc<Element>(),
cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid
):
view(view_), func(func_), fill_mode(fill_mode_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
// Fill half of matrix based on FillMode
if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kLower &&
coord[0] >= coord[1]) {
view.at(coord) = func();
} else if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kUpper &&
coord[0] <= coord[1]) {
view.at(coord) = func();
}
}
};
/// Computes a random Uniform distribution and pads diagonal with zeros
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillPadDiagonalRandomUniformFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomUniformFunc<Element> func;
cutlass::FillMode fill_mode;
int alignment;
//
// Methods
//
/// Construction of uniform RNG functor.
TensorFillPadDiagonalRandomUniformFunc(
TensorView view_ = TensorView(),
RandomUniformFunc<Element> func_ = RandomUniformFunc<Element>(),
cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid,
int alignment_ = 1
):
view(view_), func(func_), fill_mode(fill_mode_), alignment(alignment_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
// Fill half of matrix based on FillMode
if (Layout::kRank == 2 &&
(fill_mode == cutlass::FillMode::kLower) &&
(coord[0] >= coord[1]) ||
((coord[1] - coord[0]) >= alignment)) {
view.at(coord) = func();
} else if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kUpper &&
(coord[0] <= coord[1]) ||
((coord[0] - coord[1]) >= alignment)) {
view.at(coord) = func();
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values of a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomUniform(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomUniformFunc<Element> random_func(seed, max, min, bits);
detail::TensorFillRandomUniformFunc<Element, Layout> func(
dst,
random_func
);
TensorForEach(
dst.extent(),
func
);
}
/// Fills a tensor with random values of a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomUniform(
TensorViewPlanarComplex<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
TensorFillRandomUniform(dst.view_real(), seed, max, min, bits);
TensorFillRandomUniform(dst.view_imag(), ~seed, max, min, bits);
}
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomUniform(
TensorView<Quaternion<Element>, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomUniformFunc<Quaternion<Element>> random_func(seed, max, min, bits);
detail::TensorFillRandomUniformFunc<Quaternion<Element>, Layout> func(
dst,
random_func
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillSymmetricRandomUniform(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomUniformFunc<Element> random_func(seed, max, min, bits);
detail::TensorFillSymmetricRandomUniformFunc<Element, Layout> func(
dst,
random_func,
fill_mode
);
TensorForEach(
dst.extent(),
func
);
}
/// Fills a tensor with random values with a uniform random distribution pads zeros along diagonal
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillPadDiagonalRandomUniform(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1, ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
int alignment = 1
) {
detail::RandomUniformFunc<Element> random_func(seed, max, min, bits);
detail::TensorFillPadDiagonalRandomUniformFunc<Element, Layout> func(
dst,
random_func,
fill_mode,
alignment
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with a uniform value
template <
typename Element ///< Element type
>
void BlockFill(
Element *ptr,
size_t capacity,
Element val
) {
for (size_t i = 0; i < capacity; ++i) {
ReferenceFactory<Element>::get(ptr, i) = val;
}
}
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element ///< Element type
>
void BlockFillRandomUniform(
Element *ptr,
size_t capacity,
uint64_t seed, ///< seed for RNG
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomUniformFunc<Element> random_func(seed, max, min, bits);
for (size_t i = 0; i < capacity; ++i) {
ReferenceFactory<Element>::get(ptr, i) = random_func();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillDiagonalFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
Element diag;
Element other;
//
// Methods
//
TensorFillDiagonalFunc(
TensorView const &view_ = TensorView(),
Element diag_ = Element(1),
Element other_ = Element(0)
):
view(view_), diag(diag_), other(other_) { }
void operator()(Coord<Layout::kRank> const & coord) const {
bool is_diag = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[i - 1]) {
is_diag = false;
break;
}
}
view.at(coord) = (is_diag ? diag : other);
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor everywhere with a unique value for its diagonal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillDiagonal(
TensorView<Element, Layout> dst, ///< destination tensor
Element diag = Element(1), ///< value to write in the diagonal
Element other = Element(0)) { ///< value to write off the diagonal
detail::TensorFillDiagonalFunc<Element, Layout> func(
dst,
diag,
other
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to fill a tensor's diagonal with 1 and 0 everywhere else.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillIdentity(
TensorView<Element, Layout> dst) { ///< destination tensor
TensorFillDiagonal(dst, Element(1), Element(0));
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a uniform value to the diagonal of a tensor without modifying off-diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorUpdateDiagonal(
TensorView<Element, Layout> dst, ///< destination tensor
Element val = Element(1)) {
typename Layout::Index extent = dst.extent().min();
for (typename Layout::Index i = 0; i < extent; ++i) {
Coord<Layout::kRank> coord(i);
dst.at(coord) = val;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorUpdateOffDiagonalFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
Element other;
//
// Methods
//
TensorUpdateOffDiagonalFunc(
TensorView const &view_ = TensorView(),
Element other_ = Element(0)
):
view(view_), other(other_) { }
void operator()(Coord<Layout::kRank> const & coord) const {
bool is_diag = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[i - 1]) {
is_diag = false;
break;
}
}
if (!is_diag) {
view.at(coord) = other;
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a uniform value to all elements in the tensor without modifying diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorUpdateOffDiagonal(
TensorView<Element, Layout> dst, ///< destination tensor
Element other = Element(1)) {
detail::TensorUpdateOffDiagonalFunc<Element, Layout> func(
dst,
other
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillLinearFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
Array<Element, Layout::kRank> v;
Element s;
//
// Methods
//
TensorFillLinearFunc() { }
/// Constructs functor
TensorFillLinearFunc(
TensorView const &view_,
Array<Element, Layout::kRank> const & v_,
Element s_ = Element(0)
):
view(view_), v(v_), s(s_) { }
/// Updates the tensor
void operator()(Coord<Layout::kRank> const & coord) const {
Element sum(s);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank; ++i) {
sum += Element(coord[i]) * v[i];
}
view.at(coord) = sum;
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills tensor with a linear combination of its coordinate and another vector
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillLinear(
TensorView<Element, Layout> dst, ///< destination tensor
Array<Element, Layout::kRank> const & v,
Element s = Element(0)) {
detail::TensorFillLinearFunc<Element, Layout> func(
dst,
v,
s
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills tensor with a linear combination of its coordinate and another vector
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillSequential(
TensorView<Element, Layout> dst, ///< destination tensor
Element s = Element(0)) {
Array<Element, Layout::kRank> stride;
stride[0] = Element(1);
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
stride[i] = stride[i - 1] * Element(dst.extent()[i - 1]);
}
TensorFillLinear(dst, stride, s);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values from a distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandom(
TensorView<Element, Layout> view, ///< destination tensor
uint64_t seed,
Distribution dist) {
using Real = typename RealType<Element>::Type;
if (dist.kind == Distribution::Gaussian) {
TensorFillRandomGaussian(
view,
seed,
dist.gaussian.mean,
dist.gaussian.stddev,
dist.int_scale);
} else if (dist.kind == Distribution::Uniform) {
TensorFillRandomUniform(
view,
seed,
dist.uniform.max,
dist.uniform.min,
dist.int_scale);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillSequential(
Element *ptr,
int64_t capacity,
Element v = Element(1),
Element s = Element(0)) {
int i = 0;
while (i < capacity) {
cutlass::ReferenceFactory<Element, (cutlass::sizeof_bits<Element>::value <
8)>::get(ptr, i) = s;
s = Element(s + v);
++i;
}
}
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillSequentialModN(
Element *ptr,
int64_t capacity,
int64_t mod,
int64_t v = int64_t(1),
int64_t s = int64_t(0)) {
int i = 0;
while (i < capacity) {
cutlass::ReferenceFactory<Element, (cutlass::sizeof_bits<Element>::value <
8)>::get(ptr, i) = Element(s);
s = int64_t(s + v) % mod;
++i;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillRandom(
Element *ptr,
size_t capacity,
uint64_t seed,
Distribution dist) {
if (dist.kind == Distribution::Gaussian) {
BlockFillRandomGaussian<Element>(
ptr,
capacity,
seed,
dist.gaussian.mean,
dist.gaussian.stddev,
dist.int_scale,
dist.gaussian.pnz);
}
else if (dist.kind == Distribution::Uniform) {
BlockFillRandomUniform<Element>(
ptr,
capacity,
seed,
dist.uniform.max,
dist.uniform.min,
dist.int_scale);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element>
struct RandomSparseMetaFunc {
uint64_t seed;
int range;
int MetaSizeInBits;
//
// Methods
//
RandomSparseMetaFunc(
uint64_t seed_ = 0,
int MetaSizeInBits_ = 2
):
seed(seed_), MetaSizeInBits(MetaSizeInBits_) {
std::srand((unsigned)seed);
if (MetaSizeInBits_ == 2) {
range = 6;
}
else if (MetaSizeInBits_ == 4) {
range = 2;
}
else {
throw std::invalid_argument("Invalid MetaSizeInBits");
}
}
/// Compute random value and update RNG state
Element operator()() const {
Element FourToTwoMeta[6] = {0x4, 0x8, 0x9, 0xc, 0xd, 0xe};
Element TwoToOneMeta[2] = {0x4, 0xe};
Element * MetaArray = (MetaSizeInBits == 2) ? FourToTwoMeta : TwoToOneMeta;
Element result = 0x0;
for (int i = 0; i < cutlass::sizeof_bits<Element>::value / 4; ++i) {
int rnd = std::rand() % range;
Element meta = MetaArray[rnd];
result = (Element)(result | ((Element)(meta << (i * 4))));
}
return result;
}
};
/// Computes a random sparse meta
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillRandomSparseMetaFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomSparseMetaFunc<Element> func;
//
// Methods
//
/// Construction of Gaussian RNG functor.
TensorFillRandomSparseMetaFunc(
TensorView view_ = TensorView(),
RandomSparseMetaFunc<Element> func_ = RandomSparseMetaFunc<Element>()
):
view(view_), func(func_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
view.at(coord) = func();
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomSparseMeta(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
int MetaSizeInBits) { ///< 2 bit or 4 bit
detail::RandomSparseMetaFunc<Element> random_func(seed, MetaSizeInBits);
detail::TensorFillRandomSparseMetaFunc<Element, Layout> func(
dst,
random_func
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element ///< Element type
>
void BlockFillRandomSparseMeta(
Element *ptr,
size_t capacity,
uint64_t seed, ///< seed for RNG
int MetaSizeInBits) { ///< 2 bit or 4bit
detail::RandomSparseMetaFunc<Element> random_func(seed, MetaSizeInBits);
for (size_t i = 0; i < capacity; ++i) {
ptr[i] = random_func();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a ell block index matrix with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomEllIdx(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
int rows, int ell_cols, int cols) { ///< dimension of the matrix
std::srand((unsigned)seed);
for (int i = 0; i < rows; ++i) {
int col_idx = std::rand() % cols;
for (int j = 0; j < ell_cols; ++j) {
dst.at({i, j}) = col_idx;
if (col_idx != -1) {
if (col_idx == (cols - 1)) {
col_idx = -1;
} else {
col_idx = std::rand() % (cols - col_idx - 1) + col_idx + 1;
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies a diagonal in from host memory without modifying off-diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorCopyDiagonalIn(
TensorView<Element, Layout> dst, ///< destination tensor
Element const *ptr) { ///< dense buffer of elements
typename Layout::Index extent = dst.extent().min();
for (typename Layout::Index i = 0; i < extent; ++i) {
Coord<Layout::kRank> coord(i);
dst.at(coord) = ReferenceFactory<Element>::get(ptr, i);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies the diagonal of a tensor into a dense buffer in host memory.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorCopyDiagonalOut(
Element *ptr, ///< dense buffer of elements
TensorView<Element, Layout> src) { ///< source tensor
typename Layout::Index extent = src.extent().min();
for (typename Layout::Index i = 0; i < extent; ++i) {
Coord<Layout::kRank> coord(i);
ReferenceFactory<Element>::get(ptr, i) = src.at(coord);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/host/tensor_fill.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/tensor_fill.h",
"repo_id": "tools",
"token_count": 17993
} | 55 |
Subsets and Splits